diff --git a/python/paddle/fft.py b/python/paddle/fft.py index 22525e3c620a32dcf831158f7ad4127c22861fd9..74b3bb23fc683e6c5616fed085e0c4280e5a1ad0 100644 --- a/python/paddle/fft.py +++ b/python/paddle/fft.py @@ -1440,12 +1440,12 @@ def fft_c2c(x, n, axis, norm, forward, name): _check_fft_n(n) s = [n] x = _resize_fft_input(x, s, axes) - op_type = 'fft_c2c' - check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], op_type) if in_dygraph_mode(): out = _C_ops.fft_c2c(x, axes, norm, forward) else: + op_type = 'fft_c2c' + check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], op_type) inputs = { 'X': [x], } @@ -1472,12 +1472,13 @@ def fft_r2c(x, n, axis, norm, forward, onesided, name): _check_fft_n(n) s = [n] x = _resize_fft_input(x, s, axes) - op_type = 'fft_r2c' - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], op_type) - if in_dygraph_mode(): out = _C_ops.fft_r2c(x, axes, norm, forward, onesided) else: + op_type = 'fft_r2c' + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], op_type + ) inputs = { 'X': [x], } @@ -1513,8 +1514,6 @@ def fft_c2r(x, n, axis, norm, forward, name): _check_fft_n(n) s = [n // 2 + 1] x = _resize_fft_input(x, s, axes) - op_type = 'fft_c2r' - check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], op_type) if in_dygraph_mode(): if n is not None: @@ -1522,6 +1521,8 @@ def fft_c2r(x, n, axis, norm, forward, name): else: out = _C_ops.fft_c2r(x, axes, norm, forward, 0) else: + op_type = 'fft_c2r' + check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], op_type) inputs = { 'X': [x], } @@ -1572,12 +1573,12 @@ def fftn_c2c(x, s, axes, norm, forward, name): if s is not None: x = _resize_fft_input(x, s, axes) - op_type = 'fft_c2c' - check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], op_type) if in_dygraph_mode(): out = _C_ops.fft_c2c(x, axes, norm, forward) else: + op_type = 'fft_c2c' + check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], op_type) inputs = { 'X': [x], } @@ -1623,12 +1624,13 @@ def fftn_r2c(x, s, axes, norm, forward, onesided, name): if s is not None: x = _resize_fft_input(x, s, axes) - op_type = 'fft_r2c' - check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], op_type) - if in_dygraph_mode(): out = _C_ops.fft_r2c(x, axes, norm, forward, onesided) else: + op_type = 'fft_r2c' + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], op_type + ) inputs = { 'X': [x], } @@ -1686,15 +1688,14 @@ def fftn_c2r(x, s, axes, norm, forward, name): fft_input_shape[-1] = fft_input_shape[-1] // 2 + 1 x = _resize_fft_input(x, fft_input_shape, axes) - op_type = 'fft_c2r' - check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], op_type) - if in_dygraph_mode(): if s is not None: out = _C_ops.fft_c2r(x, axes, norm, forward, s[-1]) else: out = _C_ops.fft_c2r(x, axes, norm, forward, 0) else: + op_type = 'fft_c2r' + check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], op_type) inputs = { 'X': [x], } diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 08c352a3f0c40dc7d0a08f424f25d92c2b5b442e..1b86155090b52d119a9725920beb6b35c33bfd62 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -530,16 +530,6 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]] """ - attrs = {'force_cpu': force_cpu} - dtype = convert_dtype(dtype) - if not isinstance(value, Variable): - if dtype in ['uint8', 'int16', 'int32', 'int64']: - attrs['str_value'] = str(int(value)) - attrs['value'] = int(value) - else: - attrs['str_value'] = str(float(value)) - attrs['value'] = float(value) - if in_dygraph_mode(): place = _current_expected_place() if force_cpu: @@ -561,6 +551,16 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): out.stop_gradient = True return out else: + attrs = {'force_cpu': force_cpu} + dtype = convert_dtype(dtype) + if not isinstance(value, Variable): + if dtype in ['uint8', 'int16', 'int32', 'int64']: + attrs['str_value'] = str(int(value)) + attrs['value'] = int(value) + else: + attrs['str_value'] = str(float(value)) + attrs['value'] = float(value) + helper = LayerHelper("fill_constant", **locals()) inputs = {} if isinstance(value, Variable): diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 2466fa10260c03a3942e13b9a4b29f5dfc4e7e97..e5d264106e450018486ceb7804c222a0345c5a07 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -1026,8 +1026,8 @@ def eye(num_rows, num_columns=None, dtype=None, name=None): _check_attr(num_rows, "num_rows") if dtype is None: - dtype = 'float32' - if not isinstance(dtype, core.VarDesc.VarType): + dtype = core.VarDesc.VarType.FP32 + elif not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) if num_columns is not None: _check_attr(num_columns, "num_columns") @@ -1181,14 +1181,6 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): end = start start = 0 - out_shape = None - if ( - not isinstance(start, Variable) - and not isinstance(end, Variable) - and not isinstance(step, Variable) - ): - out_shape = [int(math.ceil((end - start) / step))] - if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) @@ -1220,6 +1212,13 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): 'range/arange', ) helper = LayerHelper('range', **locals()) + out_shape = None + if ( + not isinstance(start, Variable) + and not isinstance(end, Variable) + and not isinstance(step, Variable) + ): + out_shape = [int(math.ceil((end - start) / step))] out = helper.create_variable_for_type_inference(dtype, shape=out_shape) helper.append_op( type='range',