未验证 提交 861fef52 编写于 作者: W wanghuancoder 提交者: GitHub

delete legacy dygraph code in python/paddle/tensor (#49286)

* delete _in_legacy_dygraph
上级 ea741aff
...@@ -255,8 +255,7 @@ def _test_eager_guard(place=None): ...@@ -255,8 +255,7 @@ def _test_eager_guard(place=None):
try: try:
yield yield
finally: finally:
if not already_fallback: pass
_enable_legacy_dygraph()
global_ipu_index = -1 global_ipu_index = -1
......
...@@ -28,7 +28,9 @@ class TestUniqueOp(OpTest): ...@@ -28,7 +28,9 @@ class TestUniqueOp(OpTest):
self.init_config() self.init_config()
def test_check_output(self): def test_check_output(self):
paddle.enable_static()
self.check_output() self.check_output()
paddle.disable_static()
def init_config(self): def init_config(self):
self.inputs = { self.inputs = {
...@@ -72,6 +74,8 @@ class TestRandom(TestUniqueOp): ...@@ -72,6 +74,8 @@ class TestRandom(TestUniqueOp):
class TestUniqueRaiseError(unittest.TestCase): class TestUniqueRaiseError(unittest.TestCase):
def test_errors(self): def test_errors(self):
paddle.enable_static()
def test_type(): def test_type():
paddle.unique([10]) paddle.unique([10])
...@@ -82,6 +86,7 @@ class TestUniqueRaiseError(unittest.TestCase): ...@@ -82,6 +86,7 @@ class TestUniqueRaiseError(unittest.TestCase):
paddle.unique(data) paddle.unique(data)
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
paddle.disable_static()
@unittest.skipIf( @unittest.skipIf(
...@@ -100,8 +105,10 @@ class TestOneGPU(TestUniqueOp): ...@@ -100,8 +105,10 @@ class TestOneGPU(TestUniqueOp):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
paddle.enable_static()
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
paddle.disable_static()
@unittest.skipIf( @unittest.skipIf(
...@@ -125,8 +132,10 @@ class TestRandomGPU(TestUniqueOp): ...@@ -125,8 +132,10 @@ class TestRandomGPU(TestUniqueOp):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
paddle.enable_static()
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
paddle.disable_static()
class TestSortedUniqueOp(TestUniqueOp): class TestSortedUniqueOp(TestUniqueOp):
...@@ -209,16 +218,13 @@ class TestUniqueOpAxis1(TestUniqueOp): ...@@ -209,16 +218,13 @@ class TestUniqueOpAxis1(TestUniqueOp):
class TestUniqueAPI(unittest.TestCase): class TestUniqueAPI(unittest.TestCase):
def test_dygraph_api_out(self): def test_dygraph_api_out(self):
paddle.disable_static()
x_data = x_data = np.random.randint(0, 10, (120)) x_data = x_data = np.random.randint(0, 10, (120))
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out = paddle.unique(x) out = paddle.unique(x)
expected_out = np.unique(x_data) expected_out = np.unique(x_data)
self.assertTrue((out.numpy() == expected_out).all(), True) self.assertTrue((out.numpy() == expected_out).all(), True)
paddle.enable_static()
def test_dygraph_api_attr(self): def test_dygraph_api_attr(self):
paddle.disable_static()
x_data = np.random.random((3, 5, 5)).astype("float32") x_data = np.random.random((3, 5, 5)).astype("float32")
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out, index, inverse, counts = paddle.unique( out, index, inverse, counts = paddle.unique(
...@@ -239,10 +245,8 @@ class TestUniqueAPI(unittest.TestCase): ...@@ -239,10 +245,8 @@ class TestUniqueAPI(unittest.TestCase):
self.assertTrue((index.numpy() == np_index).all(), True) self.assertTrue((index.numpy() == np_index).all(), True)
self.assertTrue((inverse.numpy() == np_inverse).all(), True) self.assertTrue((inverse.numpy() == np_inverse).all(), True)
self.assertTrue((counts.numpy() == np_counts).all(), True) self.assertTrue((counts.numpy() == np_counts).all(), True)
paddle.enable_static()
def test_dygraph_attr_dtype(self): def test_dygraph_attr_dtype(self):
paddle.disable_static()
x_data = x_data = np.random.randint(0, 10, (120)) x_data = x_data = np.random.randint(0, 10, (120))
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out, indices, inverse, counts = paddle.unique( out, indices, inverse, counts = paddle.unique(
...@@ -259,9 +263,9 @@ class TestUniqueAPI(unittest.TestCase): ...@@ -259,9 +263,9 @@ class TestUniqueAPI(unittest.TestCase):
self.assertTrue((indices.numpy() == np_indices).all(), True) self.assertTrue((indices.numpy() == np_indices).all(), True)
self.assertTrue((inverse.numpy() == np_inverse).all(), True) self.assertTrue((inverse.numpy() == np_inverse).all(), True)
self.assertTrue((counts.numpy() == np_counts).all(), True) self.assertTrue((counts.numpy() == np_counts).all(), True)
paddle.enable_static()
def test_static_graph(self): def test_static_graph(self):
paddle.enable_static()
with paddle.static.program_guard( with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program() paddle.static.Program(), paddle.static.Program()
): ):
...@@ -281,6 +285,7 @@ class TestUniqueAPI(unittest.TestCase): ...@@ -281,6 +285,7 @@ class TestUniqueAPI(unittest.TestCase):
np.testing.assert_allclose(result[0], np_unique, rtol=1e-05) np.testing.assert_allclose(result[0], np_unique, rtol=1e-05)
np.testing.assert_allclose(result[1], np_inverse, rtol=1e-05) np.testing.assert_allclose(result[1], np_inverse, rtol=1e-05)
np.testing.assert_allclose(result[2], np_counts, rtol=1e-05) np.testing.assert_allclose(result[2], np_counts, rtol=1e-05)
paddle.disable_static()
class TestUniqueError(unittest.TestCase): class TestUniqueError(unittest.TestCase):
...@@ -295,6 +300,7 @@ class TestUniqueError(unittest.TestCase): ...@@ -295,6 +300,7 @@ class TestUniqueError(unittest.TestCase):
self.assertRaises(TypeError, test_x_dtype) self.assertRaises(TypeError, test_x_dtype)
def test_attr(self): def test_attr(self):
paddle.enable_static()
x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
def test_return_index(): def test_return_index():
...@@ -319,6 +325,7 @@ class TestUniqueError(unittest.TestCase): ...@@ -319,6 +325,7 @@ class TestUniqueError(unittest.TestCase):
result = paddle.unique(x, dtype='float64') result = paddle.unique(x, dtype='float64')
self.assertRaises(TypeError, test_axis) self.assertRaises(TypeError, test_axis)
paddle.disable_static()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
# Define functions about array. # Define functions about array.
from ..fluid.data_feeder import check_type, check_variable_and_dtype from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..framework import LayerHelper, _non_static_mode, core from ..framework import LayerHelper, core, in_dygraph_mode
from ..static import Variable from ..static import Variable
__all__ = [] __all__ = []
...@@ -45,27 +45,29 @@ def array_length(array): ...@@ -45,27 +45,29 @@ def array_length(array):
arr_len = paddle.tensor.array_length(arr) arr_len = paddle.tensor.array_length(arr)
print(arr_len) # 1 print(arr_len) # 1
""" """
if _non_static_mode(): if in_dygraph_mode():
assert isinstance( assert isinstance(
array, list array, list
), "The 'array' in array_write must be a list in dygraph mode" ), "The 'array' in array_write must be a list in dygraph mode"
return len(array) return len(array)
else:
if (
not isinstance(array, Variable)
or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY
):
raise TypeError(
"array should be tensor array vairable in array_length Op"
)
if ( helper = LayerHelper('array_length', **locals())
not isinstance(array, Variable) tmp = helper.create_variable_for_type_inference(dtype='int64')
or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY tmp.stop_gradient = True
): helper.append_op(
raise TypeError( type='lod_array_length',
"array should be tensor array vairable in array_length Op" inputs={'X': [array]},
outputs={'Out': [tmp]},
) )
return tmp
helper = LayerHelper('array_length', **locals())
tmp = helper.create_variable_for_type_inference(dtype='int64')
tmp.stop_gradient = True
helper.append_op(
type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]}
)
return tmp
def array_read(array, i): def array_read(array, i):
...@@ -107,7 +109,7 @@ def array_read(array, i): ...@@ -107,7 +109,7 @@ def array_read(array, i):
item = paddle.tensor.array_read(arr, i) item = paddle.tensor.array_read(arr, i)
print(item) # [[5., 5., 5.]] print(item) # [[5., 5., 5.]]
""" """
if _non_static_mode(): if in_dygraph_mode():
assert isinstance( assert isinstance(
array, list array, list
), "The 'array' in array_read must be list in dygraph mode" ), "The 'array' in array_read must be list in dygraph mode"
...@@ -119,21 +121,21 @@ def array_read(array, i): ...@@ -119,21 +121,21 @@ def array_read(array, i):
], "The shape of index 'i' should be [1] in dygraph mode" ], "The shape of index 'i' should be [1] in dygraph mode"
i = i.numpy().item(0) i = i.numpy().item(0)
return array[i] return array[i]
else:
check_variable_and_dtype(i, 'i', ['int64'], 'array_read') check_variable_and_dtype(i, 'i', ['int64'], 'array_read')
helper = LayerHelper('array_read', **locals()) helper = LayerHelper('array_read', **locals())
if ( if (
not isinstance(array, Variable) not isinstance(array, Variable)
or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY
): ):
raise TypeError("array should be tensor array vairable") raise TypeError("array should be tensor array vairable")
out = helper.create_variable_for_type_inference(dtype=array.dtype) out = helper.create_variable_for_type_inference(dtype=array.dtype)
helper.append_op( helper.append_op(
type='read_from_array', type='read_from_array',
inputs={'X': [array], 'I': [i]}, inputs={'X': [array], 'I': [i]},
outputs={'Out': [out]}, outputs={'Out': [out]},
) )
return out return out
def array_write(x, i, array=None): def array_write(x, i, array=None):
...@@ -167,7 +169,7 @@ def array_write(x, i, array=None): ...@@ -167,7 +169,7 @@ def array_write(x, i, array=None):
item = paddle.tensor.array_read(arr, i) item = paddle.tensor.array_read(arr, i)
print(item) # [[5., 5., 5.]] print(item) # [[5., 5., 5.]]
""" """
if _non_static_mode(): if in_dygraph_mode():
assert isinstance( assert isinstance(
x, Variable x, Variable
), "The input data 'x' in array_write must be Variable in dygraph mode" ), "The input data 'x' in array_write must be Variable in dygraph mode"
...@@ -191,30 +193,30 @@ def array_write(x, i, array=None): ...@@ -191,30 +193,30 @@ def array_write(x, i, array=None):
else: else:
array.append(x) array.append(x)
return array return array
else:
check_variable_and_dtype(i, 'i', ['int64'], 'array_write') check_variable_and_dtype(i, 'i', ['int64'], 'array_write')
check_type(x, 'x', (Variable), 'array_write') check_type(x, 'x', (Variable), 'array_write')
helper = LayerHelper('array_write', **locals()) helper = LayerHelper('array_write', **locals())
if array is not None: if array is not None:
if ( if (
not isinstance(array, Variable) not isinstance(array, Variable)
or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY
): ):
raise TypeError( raise TypeError(
"array should be tensor array vairable in array_write Op" "array should be tensor array vairable in array_write Op"
)
if array is None:
array = helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype,
) )
if array is None: helper.append_op(
array = helper.create_variable( type='write_to_array',
name="{0}.out".format(helper.name), inputs={'X': [x], 'I': [i]},
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, outputs={'Out': [array]},
dtype=x.dtype,
) )
helper.append_op( return array
type='write_to_array',
inputs={'X': [x], 'I': [i]},
outputs={'Out': [array]},
)
return array
def create_array(dtype, initialized_list=None): def create_array(dtype, initialized_list=None):
...@@ -265,17 +267,17 @@ def create_array(dtype, initialized_list=None): ...@@ -265,17 +267,17 @@ def create_array(dtype, initialized_list=None):
) )
) )
if _non_static_mode(): if in_dygraph_mode():
return array return array
else:
helper = LayerHelper("array", **locals())
tensor_array = helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=dtype,
)
helper = LayerHelper("array", **locals()) for val in array:
tensor_array = helper.create_variable( array_write(x=val, i=array_length(tensor_array), array=tensor_array)
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=dtype,
)
for val in array:
array_write(x=val, i=array_length(tensor_array), array=tensor_array)
return tensor_array return tensor_array
...@@ -17,10 +17,10 @@ ...@@ -17,10 +17,10 @@
import numpy as np import numpy as np
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops
from ..fluid.data_feeder import check_type, check_variable_and_dtype from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.framework import in_dygraph_mode
from ..framework import LayerHelper, core from ..framework import LayerHelper, core
from ..static import Variable from ..static import Variable
from .creation import _complex_to_real_dtype, assign from .creation import _complex_to_real_dtype, assign
...@@ -107,36 +107,32 @@ def shape(input): ...@@ -107,36 +107,32 @@ def shape(input):
out = _C_ops.shape(input) out = _C_ops.shape(input)
out.stop_gradient = True out.stop_gradient = True
return out return out
if _in_legacy_dygraph(): else:
out = _legacy_C_ops.shape(input) check_variable_and_dtype(
out.stop_gradient = True input,
return out 'input',
[
check_variable_and_dtype( 'bool',
input, 'float16',
'input', 'float32',
[ 'float64',
'bool', 'int32',
'float16', 'int64',
'float32', 'complex64',
'float64', 'complex128',
'int32', ],
'int64', 'shape',
'complex64', )
'complex128', helper = LayerHelper('shape', **locals())
], out = helper.create_variable_for_type_inference(dtype='int32')
'shape', helper.append_op(
) type='shape',
helper = LayerHelper('shape', **locals()) inputs={'Input': input},
out = helper.create_variable_for_type_inference(dtype='int32') outputs={'Out': out},
helper.append_op( stop_gradient=True,
type='shape', )
inputs={'Input': input},
outputs={'Out': out},
stop_gradient=True,
)
return out return out
def is_complex(x): def is_complex(x):
...@@ -289,16 +285,14 @@ def real(x, name=None): ...@@ -289,16 +285,14 @@ def real(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.real(x) return _C_ops.real(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.real(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real')
helper = LayerHelper('real', **locals())
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real') out = helper.create_variable_for_type_inference(
helper = LayerHelper('real', **locals()) dtype=_complex_to_real_dtype(helper.input_dtype())
out = helper.create_variable_for_type_inference( )
dtype=_complex_to_real_dtype(helper.input_dtype()) helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out})
) return out
helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out})
return out
def imag(x, name=None): def imag(x, name=None):
...@@ -336,13 +330,11 @@ def imag(x, name=None): ...@@ -336,13 +330,11 @@ def imag(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.imag(x) return _C_ops.imag(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.imag(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag')
helper = LayerHelper('imag', **locals())
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag') out = helper.create_variable_for_type_inference(
helper = LayerHelper('imag', **locals()) dtype=_complex_to_real_dtype(helper.input_dtype())
out = helper.create_variable_for_type_inference( )
dtype=_complex_to_real_dtype(helper.input_dtype()) helper.append_op(type='imag', inputs={'X': x}, outputs={'Out': out})
) return out
helper.append_op(type='imag', inputs={'X': x}, outputs={'Out': out})
return out
此差异已折叠。
...@@ -20,10 +20,10 @@ import string ...@@ -20,10 +20,10 @@ import string
import numpy as np import numpy as np
import opt_einsum import opt_einsum
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops
from ..fluid.data_feeder import check_type, check_variable_and_dtype from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.framework import in_dygraph_mode
from ..fluid.layer_helper import LayerHelper from ..fluid.layer_helper import LayerHelper
from .linalg import matmul, transpose from .linalg import matmul, transpose
from .manipulation import reshape, squeeze, unsqueeze from .manipulation import reshape, squeeze, unsqueeze
...@@ -829,38 +829,35 @@ def gen_einsum_op(equation, *operands): ...@@ -829,38 +829,35 @@ def gen_einsum_op(equation, *operands):
""" """
EinsumOp Python Interface: EinsumOp Python Interface:
""" """
assert len(operands) <= 2, "Only support two operands in EinsumOp."
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.einsum(operands, equation)[0] return _C_ops.einsum(operands, equation)[0]
else:
if _in_legacy_dygraph(): assert len(operands) <= 2, "Only support two operands in EinsumOp."
# dygraph for inp in operands:
return _legacy_C_ops.einsum( check_variable_and_dtype(
operands, len(operands), len(operands), 'equation', equation inp, 'dtype', ['float32', 'float64'], 'einsum'
)[0] )
check_type(equation, 'equation', str, 'einsum')
for inp in operands: helper = LayerHelper('einsum', **locals())
check_variable_and_dtype(inp, 'dtype', ['float32', 'float64'], 'einsum') out = helper.create_variable_for_type_inference(dtype=operands[0].dtype)
check_type(equation, 'equation', str, 'einsum') attrs = dict()
helper = LayerHelper('einsum', **locals()) attrs['equation'] = equation
out = helper.create_variable_for_type_inference(dtype=operands[0].dtype) caches = [
attrs = dict() helper.create_variable_for_type_inference(dtype=operands[0].dtype)
attrs['equation'] = equation for i in range(len(operands))
caches = [ ]
helper.create_variable_for_type_inference(dtype=operands[0].dtype) xshape = [
for i in range(len(operands)) helper.create_variable_for_type_inference(dtype=operands[0].dtype)
] for i in range(len(operands))
xshape = [ ]
helper.create_variable_for_type_inference(dtype=operands[0].dtype) helper.append_op(
for i in range(len(operands)) type='einsum',
] inputs={'Operands': operands},
helper.append_op( outputs={'Out': out, "InnerCache": caches, "XShape": xshape},
type='einsum', attrs=attrs,
inputs={'Operands': operands}, )
outputs={'Out': out, "InnerCache": caches, "XShape": xshape}, return out
attrs=attrs,
)
return out
def einsum(equation, *operands): def einsum(equation, *operands):
......
...@@ -24,7 +24,6 @@ from ..fluid.proto import framework_pb2 ...@@ -24,7 +24,6 @@ from ..fluid.proto import framework_pb2
from ..framework import ( from ..framework import (
LayerHelper, LayerHelper,
OpProtoHolder, OpProtoHolder,
_non_static_mode,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
core, core,
in_dygraph_mode, in_dygraph_mode,
...@@ -274,41 +273,44 @@ def generate_activation_fn(op_type): ...@@ -274,41 +273,44 @@ def generate_activation_fn(op_type):
op_proto = OpProtoHolder.instance().get_op_proto(op_type) op_proto = OpProtoHolder.instance().get_op_proto(op_type)
def func(x, name=None): def func(x, name=None):
if in_dygraph_mode() and hasattr(_C_ops, op_type): if in_dygraph_mode():
op = getattr(_C_ops, op_type) if hasattr(_C_ops, op_type):
return op(x) op = getattr(_C_ops, op_type)
# TODO(dev): Because some ops' yaml has not been migrated. return op(x)
# Replace it with _in_legacy_dygraph while all yaml work is done. else:
if _non_static_mode(): # TODO(dev): Because some ops' yaml has not been migrated.
op = getattr(_legacy_C_ops, op_type) # Replace it with _C_ops while all yaml work is done.
return op(x) op = getattr(_legacy_C_ops, op_type)
return op(x)
if op_type not in ["abs", "exp", "square"]:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], op_type
)
else: else:
# abs exp square ops support dtype(int32, int64, float16, float32, float64) if op_type not in ["abs", "exp", "square"]:
check_variable_and_dtype( check_variable_and_dtype(
x, x, 'x', ['float16', 'float32', 'float64'], op_type
'x', )
[ else:
'int32', # abs exp square ops support dtype(int32, int64, float16, float32, float64)
'int64', check_variable_and_dtype(
'float16', x,
'float32', 'x',
'float64', [
'complex64', 'int32',
'complex128', 'int64',
], 'float16',
op_type, 'float32',
'float64',
'complex64',
'complex128',
],
op_type,
)
helper = LayerHelper(op_type, **locals())
output = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type=op_type, inputs={"X": x}, outputs={"Out": output}
) )
return output
helper = LayerHelper(op_type, **locals())
output = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": output})
return output
func.__name__ = op_type func.__name__ = op_type
func.__doc__ = _generate_doc_string_( func.__doc__ = _generate_doc_string_(
...@@ -332,18 +334,20 @@ def generate_inplace_fn(inplace_op_type): ...@@ -332,18 +334,20 @@ def generate_inplace_fn(inplace_op_type):
origin_op_type = inplace_op_type[:-1] origin_op_type = inplace_op_type[:-1]
def func(x, name=None): def func(x, name=None):
if in_dygraph_mode() and hasattr(_C_ops, inplace_op_type): if in_dygraph_mode():
op = getattr(_C_ops, inplace_op_type) if hasattr(_C_ops, inplace_op_type):
return op(x) op = getattr(_C_ops, inplace_op_type)
if _non_static_mode(): return op(x)
op = getattr(_legacy_C_ops, inplace_op_type) else:
return op(x) op = getattr(_legacy_C_ops, inplace_op_type)
warnings.warn( return op(x)
"In static mode, {}() is the same as {}() and does not perform inplace operation.".format( else:
inplace_op_type, origin_op_type warnings.warn(
"In static mode, {}() is the same as {}() and does not perform inplace operation.".format(
inplace_op_type, origin_op_type
)
) )
) return generate_activation_fn(origin_op_type)(x, name)
return generate_activation_fn(origin_op_type)(x, name)
func.__name__ = inplace_op_type func.__name__ = inplace_op_type
func.__doc__ = """ func.__doc__ = """
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from .. import _C_ops, _legacy_C_ops from .. import _C_ops
from ..fluid.data_feeder import check_variable_and_dtype from ..fluid.data_feeder import check_variable_and_dtype
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.framework import in_dygraph_mode
from ..framework import LayerHelper from ..framework import LayerHelper
from .layer_function_generator import ( from .layer_function_generator import (
add_sample_code, add_sample_code,
...@@ -218,14 +218,14 @@ def acos(x, name=None): ...@@ -218,14 +218,14 @@ def acos(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.acos(x) return _C_ops.acos(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.acos(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'acos'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'acos') )
helper = LayerHelper('acos', **locals()) helper = LayerHelper('acos', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='acos', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='acos', inputs={"X": x}, outputs={"Out": out})
return out return out
def acosh(x, name=None): def acosh(x, name=None):
...@@ -255,14 +255,14 @@ def acosh(x, name=None): ...@@ -255,14 +255,14 @@ def acosh(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.acosh(x) return _C_ops.acosh(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.acosh(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'acosh'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'acosh') )
helper = LayerHelper('acosh', **locals()) helper = LayerHelper('acosh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='acosh', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='acosh', inputs={"X": x}, outputs={"Out": out})
return out return out
def asin(x, name=None): def asin(x, name=None):
...@@ -292,14 +292,14 @@ def asin(x, name=None): ...@@ -292,14 +292,14 @@ def asin(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.asin(x) return _C_ops.asin(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.asin(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'asin'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'asin') )
helper = LayerHelper('asin', **locals()) helper = LayerHelper('asin', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='asin', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='asin', inputs={"X": x}, outputs={"Out": out})
return out return out
def asinh(x, name=None): def asinh(x, name=None):
...@@ -329,14 +329,14 @@ def asinh(x, name=None): ...@@ -329,14 +329,14 @@ def asinh(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.asinh(x) return _C_ops.asinh(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.asinh(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'asinh'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'asinh') )
helper = LayerHelper('asinh', **locals()) helper = LayerHelper('asinh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='asinh', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='asinh', inputs={"X": x}, outputs={"Out": out})
return out return out
def atan(x, name=None): def atan(x, name=None):
...@@ -366,14 +366,14 @@ def atan(x, name=None): ...@@ -366,14 +366,14 @@ def atan(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.atan(x) return _C_ops.atan(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.atan(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'atan'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'atan') )
helper = LayerHelper('atan', **locals()) helper = LayerHelper('atan', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='atan', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='atan', inputs={"X": x}, outputs={"Out": out})
return out return out
def atanh(x, name=None): def atanh(x, name=None):
...@@ -403,14 +403,14 @@ def atanh(x, name=None): ...@@ -403,14 +403,14 @@ def atanh(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.atanh(x) return _C_ops.atanh(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.atanh(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'atanh'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'atanh') )
helper = LayerHelper('atanh', **locals()) helper = LayerHelper('atanh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='atanh', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='atanh', inputs={"X": x}, outputs={"Out": out})
return out return out
def ceil(x, name=None): def ceil(x, name=None):
...@@ -441,14 +441,14 @@ def ceil(x, name=None): ...@@ -441,14 +441,14 @@ def ceil(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.ceil(x) return _C_ops.ceil(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.ceil(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'ceil'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'ceil') )
helper = LayerHelper('ceil', **locals()) helper = LayerHelper('ceil', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='ceil', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='ceil', inputs={"X": x}, outputs={"Out": out})
return out return out
def cos(x, name=None): def cos(x, name=None):
...@@ -480,14 +480,14 @@ def cos(x, name=None): ...@@ -480,14 +480,14 @@ def cos(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.cos(x) return _C_ops.cos(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.cos(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'cos'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'cos') )
helper = LayerHelper('cos', **locals()) helper = LayerHelper('cos', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='cos', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='cos', inputs={"X": x}, outputs={"Out": out})
return out return out
def cosh(x, name=None): def cosh(x, name=None):
...@@ -519,14 +519,14 @@ def cosh(x, name=None): ...@@ -519,14 +519,14 @@ def cosh(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.cosh(x) return _C_ops.cosh(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.cosh(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'cosh'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'cosh') )
helper = LayerHelper('cosh', **locals()) helper = LayerHelper('cosh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='cosh', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='cosh', inputs={"X": x}, outputs={"Out": out})
return out return out
def exp(x, name=None): def exp(x, name=None):
...@@ -557,27 +557,25 @@ def exp(x, name=None): ...@@ -557,27 +557,25 @@ def exp(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.exp(x) return _C_ops.exp(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.exp(x) check_variable_and_dtype(
x,
check_variable_and_dtype( 'x',
x, [
'x', 'int32',
[ 'int64',
'int32', 'float16',
'int64', 'float32',
'float16', 'float64',
'float32', 'complex64',
'float64', 'complex128',
'complex64', ],
'complex128', 'exp',
], )
'exp', helper = LayerHelper('exp', **locals())
) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper = LayerHelper('exp', **locals()) helper.append_op(type='exp', inputs={"X": x}, outputs={"Out": out})
out = helper.create_variable_for_type_inference(dtype=x.dtype) return out
helper.append_op(type='exp', inputs={"X": x}, outputs={"Out": out})
return out
def expm1(x, name=None): def expm1(x, name=None):
...@@ -608,14 +606,14 @@ def expm1(x, name=None): ...@@ -608,14 +606,14 @@ def expm1(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.expm1(x) return _C_ops.expm1(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.expm1(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'expm1'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'expm1') )
helper = LayerHelper('expm1', **locals()) helper = LayerHelper('expm1', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='expm1', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='expm1', inputs={"X": x}, outputs={"Out": out})
return out return out
def floor(x, name=None): def floor(x, name=None):
...@@ -646,14 +644,14 @@ def floor(x, name=None): ...@@ -646,14 +644,14 @@ def floor(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.floor(x) return _C_ops.floor(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.floor(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'floor'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'floor') )
helper = LayerHelper('floor', **locals()) helper = LayerHelper('floor', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='floor', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='floor', inputs={"X": x}, outputs={"Out": out})
return out return out
def reciprocal(x, name=None): def reciprocal(x, name=None):
...@@ -684,16 +682,16 @@ def reciprocal(x, name=None): ...@@ -684,16 +682,16 @@ def reciprocal(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.reciprocal(x) return _C_ops.reciprocal(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.reciprocal(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'reciprocal'
check_variable_and_dtype( )
x, 'x', ['float16', 'float32', 'float64'], 'reciprocal' helper = LayerHelper('reciprocal', **locals())
) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper = LayerHelper('reciprocal', **locals()) helper.append_op(
out = helper.create_variable_for_type_inference(dtype=x.dtype) type='reciprocal', inputs={"X": x}, outputs={"Out": out}
helper.append_op(type='reciprocal', inputs={"X": x}, outputs={"Out": out}) )
return out return out
def round(x, name=None): def round(x, name=None):
...@@ -731,14 +729,14 @@ def round(x, name=None): ...@@ -731,14 +729,14 @@ def round(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.round(x) return _C_ops.round(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.round(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'round'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'round') )
helper = LayerHelper('round', **locals()) helper = LayerHelper('round', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='round', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='round', inputs={"X": x}, outputs={"Out": out})
return out return out
def rsqrt(x, name=None): def rsqrt(x, name=None):
...@@ -770,14 +768,14 @@ def rsqrt(x, name=None): ...@@ -770,14 +768,14 @@ def rsqrt(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.rsqrt(x) return _C_ops.rsqrt(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.rsqrt(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'rsqrt'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'rsqrt') )
helper = LayerHelper('rsqrt', **locals()) helper = LayerHelper('rsqrt', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='rsqrt', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='rsqrt', inputs={"X": x}, outputs={"Out": out})
return out return out
def sigmoid(x, name=None): def sigmoid(x, name=None):
...@@ -808,16 +806,14 @@ def sigmoid(x, name=None): ...@@ -808,16 +806,14 @@ def sigmoid(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.sigmoid(x) return _C_ops.sigmoid(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.sigmoid(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'sigmoid'
check_variable_and_dtype( )
x, 'x', ['float16', 'float32', 'float64'], 'sigmoid' helper = LayerHelper('sigmoid', **locals())
) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper = LayerHelper('sigmoid', **locals()) helper.append_op(type='sigmoid', inputs={"X": x}, outputs={"Out": out})
out = helper.create_variable_for_type_inference(dtype=x.dtype) return out
helper.append_op(type='sigmoid', inputs={"X": x}, outputs={"Out": out})
return out
def sin(x, name=None): def sin(x, name=None):
...@@ -847,14 +843,14 @@ def sin(x, name=None): ...@@ -847,14 +843,14 @@ def sin(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.sin(x) return _C_ops.sin(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.sin(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'sin'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'sin') )
helper = LayerHelper('sin', **locals()) helper = LayerHelper('sin', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='sin', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='sin', inputs={"X": x}, outputs={"Out": out})
return out return out
def sinh(x, name=None): def sinh(x, name=None):
...@@ -884,14 +880,14 @@ def sinh(x, name=None): ...@@ -884,14 +880,14 @@ def sinh(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.sinh(x) return _C_ops.sinh(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.sinh(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'sinh'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'sinh') )
helper = LayerHelper('sinh', **locals()) helper = LayerHelper('sinh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='sinh', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='sinh', inputs={"X": x}, outputs={"Out": out})
return out return out
def sqrt(x, name=None): def sqrt(x, name=None):
...@@ -920,14 +916,14 @@ def sqrt(x, name=None): ...@@ -920,14 +916,14 @@ def sqrt(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.sqrt(x) return _C_ops.sqrt(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.sqrt(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'sqrt'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'sqrt') )
helper = LayerHelper('sqrt', **locals()) helper = LayerHelper('sqrt', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='sqrt', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='sqrt', inputs={"X": x}, outputs={"Out": out})
return out return out
def square(x, name=None): def square(x, name=None):
...@@ -956,27 +952,25 @@ def square(x, name=None): ...@@ -956,27 +952,25 @@ def square(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.square(x) return _C_ops.square(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.square(x) check_variable_and_dtype(
x,
check_variable_and_dtype( 'x',
x, [
'x', 'int32',
[ 'int64',
'int32', 'float16',
'int64', 'float32',
'float16', 'float64',
'float32', 'complex64',
'float64', 'complex128',
'complex64', ],
'complex128', 'square',
], )
'square', helper = LayerHelper('square', **locals())
) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper = LayerHelper('square', **locals()) helper.append_op(type='square', inputs={"X": x}, outputs={"Out": out})
out = helper.create_variable_for_type_inference(dtype=x.dtype) return out
helper.append_op(type='square', inputs={"X": x}, outputs={"Out": out})
return out
def tan(x, name=None): def tan(x, name=None):
...@@ -1008,14 +1002,14 @@ def tan(x, name=None): ...@@ -1008,14 +1002,14 @@ def tan(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.tan(x) return _C_ops.tan(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.tan(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'tan'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'tan') )
helper = LayerHelper('tan', **locals()) helper = LayerHelper('tan', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='tan', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='tan', inputs={"X": x}, outputs={"Out": out})
return out return out
_erf_ = generate_layer_fn('erf') _erf_ = generate_layer_fn('erf')
......
此差异已折叠。
此差异已折叠。
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode from paddle.fluid.framework import in_dygraph_mode
from ..fluid.data_feeder import check_type, check_variable_and_dtype from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..framework import LayerHelper, core from ..framework import LayerHelper, core
...@@ -81,39 +81,37 @@ def mean(x, axis=None, keepdim=False, name=None): ...@@ -81,39 +81,37 @@ def mean(x, axis=None, keepdim=False, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.mean(x, axis, keepdim) return _C_ops.mean(x, axis, keepdim)
else:
reduce_all, axis = _get_reduce_axis_with_tensor(axis, x) reduce_all, axis = _get_reduce_axis_with_tensor(axis, x)
if _in_legacy_dygraph(): check_variable_and_dtype(
return _legacy_C_ops.reduce_mean( x,
x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all 'x/input',
['uint16', 'float16', 'float32', 'float64'],
'mean/reduce_mean',
) )
check_type(
axis, 'axis/dim', (int, list, tuple, Variable), 'mean/reduce_mean'
)
if isinstance(axis, (list, tuple)):
for item in axis:
check_type(
item,
'elements of axis/dim',
(int, Variable),
'mean/reduce_mean',
)
check_variable_and_dtype( helper = LayerHelper('mean', **locals())
x,
'x/input',
['uint16', 'float16', 'float32', 'float64'],
'mean/reduce_mean',
)
check_type(
axis, 'axis/dim', (int, list, tuple, Variable), 'mean/reduce_mean'
)
if isinstance(axis, (list, tuple)):
for item in axis:
check_type(
item,
'elements of axis/dim',
(int, Variable),
'mean/reduce_mean',
)
helper = LayerHelper('mean', **locals())
attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all} attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}
out = helper.create_variable_for_type_inference(x.dtype) out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op( helper.append_op(
type='reduce_mean', inputs={'X': x}, outputs={'Out': out}, attrs=attrs type='reduce_mean',
) inputs={'X': x},
return out outputs={'Out': out},
attrs=attrs,
)
return out
def var(x, axis=None, unbiased=True, keepdim=False, name=None): def var(x, axis=None, unbiased=True, keepdim=False, name=None):
...@@ -146,7 +144,7 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None): ...@@ -146,7 +144,7 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None):
out2 = paddle.var(x, axis=1) out2 = paddle.var(x, axis=1)
# [1. 4.33333333] # [1. 4.33333333]
""" """
if not paddle.in_dynamic_mode(): if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'var') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'var')
u = mean(x, axis, True, name) u = mean(x, axis, True, name)
...@@ -211,7 +209,7 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None): ...@@ -211,7 +209,7 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None):
# [1. 2.081666] # [1. 2.081666]
""" """
if not paddle.in_dynamic_mode(): if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'std') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'std')
out = var(**locals()) out = var(**locals())
...@@ -243,17 +241,15 @@ def numel(x, name=None): ...@@ -243,17 +241,15 @@ def numel(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.numel(x) return _C_ops.numel(x)
elif _in_legacy_dygraph(): else:
return _legacy_C_ops.size(x) if not isinstance(x, Variable):
raise TypeError("x must be a Tensor in numel")
if not isinstance(x, Variable): helper = LayerHelper('numel', **locals())
raise TypeError("x must be a Tensor in numel") out = helper.create_variable_for_type_inference(
helper = LayerHelper('numel', **locals()) dtype=core.VarDesc.VarType.INT64
out = helper.create_variable_for_type_inference( )
dtype=core.VarDesc.VarType.INT64 helper.append_op(type='size', inputs={'Input': x}, outputs={'Out': out})
) return out
helper.append_op(type='size', inputs={'Input': x}, outputs={'Out': out})
return out
def nanmedian(x, axis=None, keepdim=True, name=None): def nanmedian(x, axis=None, keepdim=True, name=None):
...@@ -331,27 +327,30 @@ def nanmedian(x, axis=None, keepdim=True, name=None): ...@@ -331,27 +327,30 @@ def nanmedian(x, axis=None, keepdim=True, name=None):
if len(axis) != len(set(axis)): if len(axis) != len(set(axis)):
raise ValueError("Axis has duplicated elements.") raise ValueError("Axis has duplicated elements.")
if _in_legacy_dygraph(): if in_dygraph_mode():
median_index, out = _legacy_C_ops.nanmedian( median_index, out = _legacy_C_ops.nanmedian(
x, 'axis', axis, 'keepdim', keepdim x, 'axis', axis, 'keepdim', keepdim
) )
return out return out
else:
check_variable_and_dtype(
x,
'X',
['int32', 'int64', 'float16', 'float32', 'float64'],
'nanmedian',
)
check_variable_and_dtype( helper = LayerHelper('nanmedian', **locals())
x, 'X', ['int32', 'int64', 'float16', 'float32', 'float64'], 'nanmedian' attrs = {'axis': axis, 'keepdim': keepdim}
) out = helper.create_variable_for_type_inference(x.dtype)
medians = helper.create_variable_for_type_inference(x.dtype)
helper = LayerHelper('nanmedian', **locals()) helper.append_op(
attrs = {'axis': axis, 'keepdim': keepdim} type='nanmedian',
out = helper.create_variable_for_type_inference(x.dtype) inputs={'X': x},
medians = helper.create_variable_for_type_inference(x.dtype) outputs={'Out': out, 'MedianIndex': medians},
helper.append_op( attrs=attrs,
type='nanmedian', )
inputs={'X': x}, return out
outputs={'Out': out, 'MedianIndex': medians},
attrs=attrs,
)
return out
def median(x, axis=None, keepdim=False, name=None): def median(x, axis=None, keepdim=False, name=None):
...@@ -534,7 +533,7 @@ def _compute_quantile(x, q, axis=None, keepdim=False, ignore_nan=False): ...@@ -534,7 +533,7 @@ def _compute_quantile(x, q, axis=None, keepdim=False, ignore_nan=False):
for q_num in q: for q_num in q:
if q_num < 0 or q_num > 1: if q_num < 0 or q_num > 1:
raise ValueError("q should be in range [0, 1]") raise ValueError("q should be in range [0, 1]")
if paddle.in_dynamic_mode(): if in_dygraph_mode():
q_num = paddle.to_tensor(q_num, dtype='float64') q_num = paddle.to_tensor(q_num, dtype='float64')
if ignore_nan: if ignore_nan:
indices.append(q_num * (valid_counts - 1)) indices.append(q_num * (valid_counts - 1))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册