未验证 提交 9b611ea2 编写于 作者: H Hao Lin 提交者: GitHub

opt dygraph python code for 215 unchecked calls (#34024)

* opt dygraph python API, test=develop

* Fix unbind bug in manipulation.py
上级 97faf90e
......@@ -322,7 +322,6 @@ class Uniform(Distribution):
Tensor: log probability.The data type is same with value.
"""
name = self.name + '_log_prob'
value = self._check_values_dtype_in_probs(self.low, value)
if in_dygraph_mode():
# ensure value in [low, high]
......@@ -335,6 +334,7 @@ class Uniform(Distribution):
value.dtype)
return nn.log(lb * ub) - nn.log(self.high - self.low)
name = self.name + '_log_prob'
lb_bool = self.low < value
ub_bool = value < self.high
lb = tensor.cast(lb_bool, dtype=value.dtype)
......@@ -352,7 +352,6 @@ class Uniform(Distribution):
Tensor: probability.The data type is same with value.
"""
name = self.name + '_probs'
value = self._check_values_dtype_in_probs(self.low, value)
if in_dygraph_mode():
lb_bool = self.low < value
......@@ -364,6 +363,7 @@ class Uniform(Distribution):
value.dtype)
return (lb * ub) / (self.high - self.low)
name = self.name + '_probs'
lb_bool = self.low < value
ub_bool = value < self.high
lb = tensor.cast(lb_bool, dtype=value.dtype)
......
......@@ -1538,19 +1538,18 @@ def bilateral_slice(x, guide, grid, has_offset, name=None):
output = fluid.contrib.bilateral_slice(x, guide, grid, has_offset=True)
"""
helper = LayerHelper("bilateral_slice", **locals())
if paddle.fluid.in_dygraph_mode():
attrs = ('has_offset', has_offset)
return getattr(core.ops, "bilateral_slice")(x, grid, guide, *attrs)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'bilateral_slice')
check_variable_and_dtype(guide, 'guide', ['float32', 'float64'],
'bilateral_slice')
check_variable_and_dtype(grid, 'grid', ['float32', 'float64'],
'bilateral_slice')
helper = LayerHelper("bilateral_slice", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
inputs = {'X': x, 'Guide': guide, 'Grid': grid}
if paddle.fluid.in_dygraph_mode():
attrs = ('has_offset', has_offset)
return getattr(core.ops, "bilateral_slice")(x, grid, guide, *attrs)
helper.append_op(
type='bilateral_slice',
inputs=inputs,
......@@ -1613,14 +1612,14 @@ def correlation(x,
"""
helper = LayerHelper("correlation", **locals())
output = helper.create_variable_for_type_inference(dtype=x.dtype)
if paddle.fluid.in_dygraph_mode():
attrs = ("pad_size", pad_size, "kernel_size", kernel_size,
"max_displacement", max_displacement, "stride1", stride1,
"stride2", stride2, "corr_type_multiply", corr_type_multiply)
output = getattr(core.ops, "correlation")(x, y, *attrs)
else:
helper = LayerHelper("correlation", **locals())
output = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="correlation",
inputs={"Input1": x,
......
......@@ -200,10 +200,6 @@ class Momentum(Optimizer):
velocity_acc = self._get_accumulator(self._velocity_acc_str,
param_and_grad[0])
find_master = self._multi_precision and param_and_grad[
0].dtype == core.VarDesc.VarType.FP16
master_weight = (self._master_weights[param_and_grad[0].name]
if find_master else None)
lr = self._create_param_lr(param_and_grad)
if framework.in_dygraph_mode():
......@@ -215,6 +211,10 @@ class Momentum(Optimizer):
self._regularization_coeff)
return None
find_master = self._multi_precision and param_and_grad[
0].dtype == core.VarDesc.VarType.FP16
master_weight = (self._master_weights[param_and_grad[0].name]
if find_master else None)
attrs = {
"mu": self._momentum,
"use_nesterov": self._use_nesterov,
......
......@@ -3945,8 +3945,6 @@ def collect_fpn_proposals(multi_rois,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
......@@ -3957,6 +3955,8 @@ def collect_fpn_proposals(multi_rois,
output_rois, rois_num = core.ops.collect_fpn_proposals(
input_rois, input_scores, rois_num_per_level, *attrs)
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
......
......@@ -914,6 +914,9 @@ class Optimizer(object):
assert regularization_term is not None
if framework.in_dygraph_mode():
return core.ops.sum([grad, regularization_term])
new_grad = grad
if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
# FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
......@@ -929,10 +932,7 @@ class Optimizer(object):
inputs = {"X": [grad, regularization_term]}
outputs = {"Out": [new_grad]}
if framework.in_dygraph_mode():
new_grad = core.ops.sum([grad, regularization_term])
else:
grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
return new_grad
......
......@@ -132,9 +132,6 @@ class L2DecayRegularizer(WeightDecayRegularizer):
assert isinstance(param, framework.Variable)
assert isinstance(block, framework.Block)
inputs = {"X": [param]}
attrs = {"scale": self._regularization_coeff}
if framework.in_dygraph_mode():
return core.ops.scale(param, "scale", self._regularization_coeff)
else:
......
......@@ -432,7 +432,6 @@ def prelu(x, weight, name=None):
check_variable_and_dtype(weight, 'weight',
['float16', 'float32', 'float64'], 'prelu')
helper = LayerHelper('prelu', **locals())
assert len(weight.shape
) == 1, "The dim count of weight shape should be 1 in prelu()."
......@@ -450,6 +449,7 @@ def prelu(x, weight, name=None):
if in_dygraph_mode():
return core.ops.prelu(x, weight, 'mode', mode)
helper = LayerHelper('prelu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type="prelu",
......
......@@ -453,13 +453,13 @@ def interpolate(x,
if resample_type == "linear":
out = core.ops.linear_interp_v2(x, *dy_attr)
if resample_type == "bilinear":
elif resample_type == "bilinear":
out = core.ops.bilinear_interp_v2(x, *dy_attr)
if resample_type == "trilinear":
elif resample_type == "trilinear":
out = core.ops.trilinear_interp_v2(x, *dy_attr)
if resample_type == "nearest":
elif resample_type == "nearest":
out = core.ops.nearest_interp_v2(x, *dy_attr)
if resample_type == "bicubic":
elif resample_type == "bicubic":
out = core.ops.bicubic_interp_v2(x, *dy_attr)
return out
out = helper.create_variable_for_type_inference(dtype)
......@@ -881,18 +881,6 @@ def dropout(x,
seed = None
mode = 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode #semantic transfer
def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed
attrs = {
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': mode,
}
return attrs
if in_dygraph_mode():
if default_main_program().random_seed != 0:
seed = default_main_program().random_seed
......@@ -910,6 +898,18 @@ def dropout(x,
mask = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed
attrs = {
'dropout_prob': dropout_prob,
'is_test': is_test,
'fix_seed': seed is not None,
'seed': seed if seed is not None else 0,
'dropout_implementation': mode,
}
return attrs
attrs = get_attrs(helper.main_program, p, not training, seed)
helper.append_op(
......
......@@ -109,7 +109,6 @@ def _conv_nd(x,
name=None):
# Due to the poor performance of NHWC, we transpose the input to NCHW.
origin_format = data_format
if in_dygraph_mode():
attrs = ('strides', stride, 'paddings', padding, 'dilations', dilation,
'groups', groups, 'use_cudnn', use_cudnn, 'use_mkldnn',
......@@ -332,18 +331,6 @@ def conv1d(x,
l_type = 'depthwise_conv2d'
use_cudnn = False
inputs = {'Input': [x], 'Filter': [weight]}
attrs = {
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'use_mkldnn': False,
'fuse_relu_before_depthwise_conv': False,
"padding_algorithm": padding_algorithm,
"data_format": conv2d_data_format
}
squeeze_aixs = -2 if channel_last else -1
x = nn.unsqueeze(input=x, axes=[squeeze_aixs])
weight = nn.unsqueeze(input=weight, axes=[-1])
......
......@@ -196,7 +196,8 @@ def avg_pool1d(x,
"""
"""NCL to NCHW"""
data_format = "NCHW"
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool1d')
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool1d')
_check_input(x, 3)
x = unsqueeze(x, [2])
kernel_size = utils.convert_to_list(kernel_size, 1, 'kernel_size')
......@@ -315,7 +316,6 @@ def avg_pool2d(x,
stride=2, padding=0)
# out.shape [1, 3, 16, 16]
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool2d')
kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
if stride is None:
stride = kernel_size
......@@ -341,6 +341,7 @@ def avg_pool2d(x,
op_type = 'pool2d'
helper = LayerHelper(op_type, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool2d')
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
......@@ -434,7 +435,6 @@ def avg_pool3d(x,
padding=0)
# out.shape: [1, 3, 16, 16, 16]
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
if stride is None:
stride = kernel_size
......@@ -461,6 +461,7 @@ def avg_pool3d(x,
op_type = "pool3d"
helper = LayerHelper(op_type, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
outputs = {"Out": pool_out}
......@@ -547,7 +548,8 @@ def max_pool1d(x,
"""
"""NCL to NCHW"""
data_format = "NCHW"
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool1d')
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool1d')
_check_input(x, 3)
x = unsqueeze(x, [2])
kernel_size = [1] + utils.convert_to_list(kernel_size, 1, 'pool_size')
......@@ -679,8 +681,6 @@ def max_pool2d(x,
return_mask=True)
# out.shape [1, 3, 16, 16], max_indices.shape [1, 3, 16, 16],
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'max_pool2d')
kernel_size = utils.convert_to_list(kernel_size, 2, 'pool_size')
if stride is None:
stride = kernel_size
......@@ -722,6 +722,8 @@ def max_pool2d(x,
op_type = 'max_pool2d_with_index' if return_mask else "pool2d"
helper = LayerHelper(op_type, **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'max_pool2d')
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype)
......@@ -815,7 +817,6 @@ def max_pool3d(x,
return_mask=True)
# output.shape [None, 3, 16, 16, 16], max_indices.shape [None, 3, 16, 16, 16],
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
kernel_size = utils.convert_to_list(kernel_size, 3, 'pool_size')
if stride is None:
stride = kernel_size
......@@ -852,6 +853,7 @@ def max_pool3d(x,
op_type = "max_pool3d_with_index" if return_mask else "pool3d"
helper = LayerHelper(op_type, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype)
......@@ -921,20 +923,21 @@ def adaptive_avg_pool1d(x, output_size, name=None):
# pool_out shape: [1, 3, 16])
"""
pool_type = 'avg'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'adaptive_pool2d')
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'adaptive_pool2d')
check_type(output_size, 'pool_size', (int), 'adaptive_pool1d')
_check_input(x, 3)
check_type(output_size, 'pool_size', (int), 'adaptive_pool1d')
pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size')
l_type = "pool2d"
x = unsqueeze(x, [2])
if in_dygraph_mode():
pool_out = core.ops.pool2d(x, 'pooling_type', pool_type, 'ksize',
pool_size, 'adaptive', True)
return squeeze(pool_out, [2])
l_type = "pool2d"
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
......@@ -1006,7 +1009,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'adaptive_avg_pool2d')
check_type(data_format, 'data_format', str, 'adaptive_avg_pool2d')
check_type(data_format, 'data_format', str, 'adaptive_avg_pool2d')
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
......@@ -1110,7 +1113,7 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'adaptive_avg_pool3d')
check_type(data_format, 'data_format', str, 'adaptive_avg_pool3d')
check_type(data_format, 'data_format', str, 'adaptive_avg_pool3d')
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
......@@ -1207,16 +1210,15 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
# pool_out shape: [1, 3, 16] indices shape: [1, 3, 16]
"""
pool_type = 'max'
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'adaptive_max_pool1d')
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'adaptive_max_pool1d')
check_type(output_size, 'pool_size', int, 'adaptive_max_pool1d')
check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool1d')
_check_input(x, 3)
check_type(output_size, 'pool_size', int, 'adaptive_max_pool1d')
check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool1d')
pool_size = [1] + utils.convert_to_list(output_size, 1, 'pool_size')
l_type = 'max_pool2d_with_index'
x = unsqueeze(x, [2])
if in_dygraph_mode():
pool_out = core.ops.max_pool2d_with_index(
......@@ -1224,6 +1226,8 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
return (squeeze(pool_out[0], [2]), squeeze(
pool_out[1], [2])) if return_mask else squeeze(pool_out[0], [2])
l_type = 'max_pool2d_with_index'
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
pool_out = helper.create_variable_for_type_inference(dtype)
......@@ -1291,9 +1295,9 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'adaptive_max_pool2d')
check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool2d')
#check_type(output_size, 'pool_size', (int), 'adaptive_max_pool2d')
_check_input(x, 4)
#check_type(output_size, 'pool_size', (int), 'adaptive_max_pool2d')
check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool2d')
in_h, in_w = x.shape[2:4]
if isinstance(output_size, int):
......@@ -1382,9 +1386,9 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'adaptive_max_pool3d')
check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool3d')
#check_type(output_size, 'pool_size', (int), 'adaptive_max_pool3d')
_check_input(x, 5)
#check_type(output_size, 'pool_size', (int), 'adaptive_max_pool3d')
check_type(return_mask, 'return_mask', bool, 'adaptive_max_pool3d')
in_l, in_h, in_w = x.shape[2:5]
if isinstance(output_size, int):
......
......@@ -73,12 +73,9 @@ def affine_grid(theta, out_shape, align_corners=True, name=None):
# [-0.16666666 1.9000001 ]
# [-0.43333334 2.2333333 ]]]]
"""
helper = LayerHelper('affine_grid')
if not isinstance(theta, Variable):
raise ValueError("The theta should be a Tensor.")
check_variable_and_dtype(theta, 'theta', ['float32', 'float64'],
'affine_grid')
cudnn_version = get_cudnn_version()
if cudnn_version is not None and cudnn_version >= 6000 and align_corners:
use_cudnn = True
......@@ -98,6 +95,9 @@ def affine_grid(theta, out_shape, align_corners=True, name=None):
"align_corners", align_corners, "use_cudnn",
use_cudnn)
helper = LayerHelper('affine_grid')
check_variable_and_dtype(theta, 'theta', ['float32', 'float64'],
'affine_grid')
out = helper.create_variable_for_type_inference(theta.dtype)
ipts = {'Theta': theta}
attrs = {"align_corners": align_corners, "use_cudnn": use_cudnn}
......@@ -243,10 +243,6 @@ def grid_sample(x,
# [ 0.55 -0.076 0.35 0.59 ]
# [ 0.596 0.38 0.52 0.24 ]]]]
"""
helper = LayerHelper("grid_sample", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sample')
check_variable_and_dtype(grid, 'grid', ['float32', 'float64'],
'grid_sample')
_modes = ['bilinear', 'nearest']
_padding_modes = ['zeros', 'reflection', 'border']
......@@ -272,19 +268,23 @@ def grid_sample(x,
# CUDNN always computes gradients for all inputs
x.stop_gradient = False
grid.stop_gradient = False
ipts = {'X': x, 'Grid': grid}
attrs = {
'mode': mode,
'padding_mode': padding_mode,
'align_corners': align_corners,
'use_cudnn': use_cudnn
}
if in_dygraph_mode():
attrs = ('mode', mode, 'padding_mode', padding_mode, 'align_corners',
align_corners, 'use_cudnn', use_cudnn)
out = getattr(core.ops, 'grid_sampler')(x, grid, *attrs)
else:
helper = LayerHelper("grid_sample", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sample')
check_variable_and_dtype(grid, 'grid', ['float32', 'float64'],
'grid_sample')
ipts = {'X': x, 'Grid': grid}
attrs = {
'mode': mode,
'padding_mode': padding_mode,
'align_corners': align_corners,
'use_cudnn': use_cudnn
}
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='grid_sampler',
......@@ -319,10 +319,6 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
out = out_var.numpy()
# (2, 1, 12, 12)
"""
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'pixel_shuffle')
if not isinstance(upscale_factor, int):
raise TypeError("upscale factor must be int type")
......@@ -336,7 +332,7 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
"data_format", data_format)
helper = LayerHelper("pixel_shuffle", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pixel_shuffle')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="pixel_shuffle",
......
......@@ -910,6 +910,9 @@ class Optimizer(object):
assert regularization_term is not None
if framework.in_dygraph_mode():
return core.ops.sum([grad, regularization_term])
new_grad = grad
if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
# FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
......@@ -925,10 +928,7 @@ class Optimizer(object):
inputs = {"X": [grad, regularization_term]}
outputs = {"Out": [new_grad]}
if framework.in_dygraph_mode():
new_grad = core.ops.sum([grad, regularization_term])
else:
grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
return new_grad
......
......@@ -832,9 +832,11 @@ def bmm(x, y, name=None):
raise ValueError(
"x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}".
format(x_shape, y_shape))
helper = LayerHelper('bmm', **locals())
if in_dygraph_mode():
return core.ops.bmm(x, y)
helper = LayerHelper('bmm', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out
......
......@@ -190,7 +190,7 @@ def broadcast_tensors(input, name=None):
last_index = output_shape_r_last_tensor_index[i]
raise TypeError(
"Input tensors to broadcast_tensors does not follow bcast semantics"
f"Tensor {last_index} conflicts with Tensor {j} in reversed dimension {i}"
"Tensor {last_index} conflicts with Tensor {j} in reversed dimension {i}"
)
if output_shape_r[i] <= shape[i]:
output_shape_r[i] = shape[i]
......@@ -339,10 +339,10 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Tensor")
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'],
'flatten')
helper = LayerHelper('flatten', **locals())
if not in_dygraph_mode():
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'],
'flatten')
x_dim = len(x.shape)
if not (isinstance(start_axis, int)) or (
......@@ -365,6 +365,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
x, 'start_axis', start_axis, 'stop_axis', stop_axis)
return dy_out
helper = LayerHelper('flatten', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
......@@ -442,7 +443,6 @@ def roll(x, shifts, axis=None, name=None):
# [1. 2. 3.]
# [4. 5. 6.]]
"""
helper = LayerHelper("roll", **locals())
origin_shape = x.shape
if type(shifts) == int:
shifts = [shifts]
......@@ -456,17 +456,15 @@ def roll(x, shifts, axis=None, name=None):
raise ValueError(
"axis is out of range, it should be in range [{}, {}), but received {}".
format(-len_origin_shape, len_origin_shape, axis))
if axis:
check_type(axis, 'axis', (list, tuple), 'roll')
else:
axis = []
check_type(shifts, 'shifts', (list, tuple), 'roll')
if in_dygraph_mode():
return core.ops.roll(x, 'axis', axis, 'shifts', shifts)
helper = LayerHelper("roll", **locals())
check_type(axis, 'axis', (list, tuple), 'roll')
check_type(shifts, 'shifts', (list, tuple), 'roll')
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
......@@ -1017,11 +1015,6 @@ def unbind(input, axis=0):
# x3.shape [3, 5]
"""
helper = LayerHelper("unbind", **locals())
check_type(input, 'input', (Variable), 'unbind')
dtype = helper.input_dtype()
check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'],
'unbind')
if not isinstance(axis, (int)):
raise TypeError("The type of 'axis' must be int, but received %s." %
(type(axis)))
......@@ -1030,13 +1023,18 @@ def unbind(input, axis=0):
input_shape = input.shape
axis_ = axis if axis >= 0 else len(input_shape) + axis
num = input_shape[axis_]
if in_dygraph_mode():
return core.ops.unbind(input, num, 'axis', axis)
helper = LayerHelper("unbind", **locals())
check_type(input, 'input', (Variable), 'unbind')
dtype = helper.input_dtype()
check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'],
'unbind')
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
if in_dygraph_mode():
return core.ops.unbind(input, num, 'axis', axis)
helper.append_op(
type="unbind",
inputs={"X": input},
......
......@@ -159,7 +159,6 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
)
var_dtype = convert_np_dtype_to_dtype_(dtype)
check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
flatten = False
if axis is None:
flatten = True
......@@ -174,6 +173,7 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
'paddle.argmax')
check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
attrs = {}
out = helper.create_variable_for_type_inference(var_dtype)
attrs['keepdims'] = keepdim
......@@ -236,7 +236,6 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
)
var_dtype = convert_np_dtype_to_dtype_(dtype)
check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
flatten = False
if axis is None:
flatten = True
......@@ -251,6 +250,7 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
'paddle.argmin')
check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
out = helper.create_variable_for_type_inference(var_dtype)
attrs = {}
attrs['keepdims'] = keepdim
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册