未验证 提交 861fef52 编写于 作者: W wanghuancoder 提交者: GitHub

delete legacy dygraph code in python/paddle/tensor (#49286)

* delete _in_legacy_dygraph
上级 ea741aff
...@@ -255,8 +255,7 @@ def _test_eager_guard(place=None): ...@@ -255,8 +255,7 @@ def _test_eager_guard(place=None):
try: try:
yield yield
finally: finally:
if not already_fallback: pass
_enable_legacy_dygraph()
global_ipu_index = -1 global_ipu_index = -1
......
...@@ -28,7 +28,9 @@ class TestUniqueOp(OpTest): ...@@ -28,7 +28,9 @@ class TestUniqueOp(OpTest):
self.init_config() self.init_config()
def test_check_output(self): def test_check_output(self):
paddle.enable_static()
self.check_output() self.check_output()
paddle.disable_static()
def init_config(self): def init_config(self):
self.inputs = { self.inputs = {
...@@ -72,6 +74,8 @@ class TestRandom(TestUniqueOp): ...@@ -72,6 +74,8 @@ class TestRandom(TestUniqueOp):
class TestUniqueRaiseError(unittest.TestCase): class TestUniqueRaiseError(unittest.TestCase):
def test_errors(self): def test_errors(self):
paddle.enable_static()
def test_type(): def test_type():
paddle.unique([10]) paddle.unique([10])
...@@ -82,6 +86,7 @@ class TestUniqueRaiseError(unittest.TestCase): ...@@ -82,6 +86,7 @@ class TestUniqueRaiseError(unittest.TestCase):
paddle.unique(data) paddle.unique(data)
self.assertRaises(TypeError, test_dtype) self.assertRaises(TypeError, test_dtype)
paddle.disable_static()
@unittest.skipIf( @unittest.skipIf(
...@@ -100,8 +105,10 @@ class TestOneGPU(TestUniqueOp): ...@@ -100,8 +105,10 @@ class TestOneGPU(TestUniqueOp):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
paddle.enable_static()
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
paddle.disable_static()
@unittest.skipIf( @unittest.skipIf(
...@@ -125,8 +132,10 @@ class TestRandomGPU(TestUniqueOp): ...@@ -125,8 +132,10 @@ class TestRandomGPU(TestUniqueOp):
def test_check_output(self): def test_check_output(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
paddle.enable_static()
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
paddle.disable_static()
class TestSortedUniqueOp(TestUniqueOp): class TestSortedUniqueOp(TestUniqueOp):
...@@ -209,16 +218,13 @@ class TestUniqueOpAxis1(TestUniqueOp): ...@@ -209,16 +218,13 @@ class TestUniqueOpAxis1(TestUniqueOp):
class TestUniqueAPI(unittest.TestCase): class TestUniqueAPI(unittest.TestCase):
def test_dygraph_api_out(self): def test_dygraph_api_out(self):
paddle.disable_static()
x_data = x_data = np.random.randint(0, 10, (120)) x_data = x_data = np.random.randint(0, 10, (120))
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out = paddle.unique(x) out = paddle.unique(x)
expected_out = np.unique(x_data) expected_out = np.unique(x_data)
self.assertTrue((out.numpy() == expected_out).all(), True) self.assertTrue((out.numpy() == expected_out).all(), True)
paddle.enable_static()
def test_dygraph_api_attr(self): def test_dygraph_api_attr(self):
paddle.disable_static()
x_data = np.random.random((3, 5, 5)).astype("float32") x_data = np.random.random((3, 5, 5)).astype("float32")
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out, index, inverse, counts = paddle.unique( out, index, inverse, counts = paddle.unique(
...@@ -239,10 +245,8 @@ class TestUniqueAPI(unittest.TestCase): ...@@ -239,10 +245,8 @@ class TestUniqueAPI(unittest.TestCase):
self.assertTrue((index.numpy() == np_index).all(), True) self.assertTrue((index.numpy() == np_index).all(), True)
self.assertTrue((inverse.numpy() == np_inverse).all(), True) self.assertTrue((inverse.numpy() == np_inverse).all(), True)
self.assertTrue((counts.numpy() == np_counts).all(), True) self.assertTrue((counts.numpy() == np_counts).all(), True)
paddle.enable_static()
def test_dygraph_attr_dtype(self): def test_dygraph_attr_dtype(self):
paddle.disable_static()
x_data = x_data = np.random.randint(0, 10, (120)) x_data = x_data = np.random.randint(0, 10, (120))
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out, indices, inverse, counts = paddle.unique( out, indices, inverse, counts = paddle.unique(
...@@ -259,9 +263,9 @@ class TestUniqueAPI(unittest.TestCase): ...@@ -259,9 +263,9 @@ class TestUniqueAPI(unittest.TestCase):
self.assertTrue((indices.numpy() == np_indices).all(), True) self.assertTrue((indices.numpy() == np_indices).all(), True)
self.assertTrue((inverse.numpy() == np_inverse).all(), True) self.assertTrue((inverse.numpy() == np_inverse).all(), True)
self.assertTrue((counts.numpy() == np_counts).all(), True) self.assertTrue((counts.numpy() == np_counts).all(), True)
paddle.enable_static()
def test_static_graph(self): def test_static_graph(self):
paddle.enable_static()
with paddle.static.program_guard( with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program() paddle.static.Program(), paddle.static.Program()
): ):
...@@ -281,6 +285,7 @@ class TestUniqueAPI(unittest.TestCase): ...@@ -281,6 +285,7 @@ class TestUniqueAPI(unittest.TestCase):
np.testing.assert_allclose(result[0], np_unique, rtol=1e-05) np.testing.assert_allclose(result[0], np_unique, rtol=1e-05)
np.testing.assert_allclose(result[1], np_inverse, rtol=1e-05) np.testing.assert_allclose(result[1], np_inverse, rtol=1e-05)
np.testing.assert_allclose(result[2], np_counts, rtol=1e-05) np.testing.assert_allclose(result[2], np_counts, rtol=1e-05)
paddle.disable_static()
class TestUniqueError(unittest.TestCase): class TestUniqueError(unittest.TestCase):
...@@ -295,6 +300,7 @@ class TestUniqueError(unittest.TestCase): ...@@ -295,6 +300,7 @@ class TestUniqueError(unittest.TestCase):
self.assertRaises(TypeError, test_x_dtype) self.assertRaises(TypeError, test_x_dtype)
def test_attr(self): def test_attr(self):
paddle.enable_static()
x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64')
def test_return_index(): def test_return_index():
...@@ -319,6 +325,7 @@ class TestUniqueError(unittest.TestCase): ...@@ -319,6 +325,7 @@ class TestUniqueError(unittest.TestCase):
result = paddle.unique(x, dtype='float64') result = paddle.unique(x, dtype='float64')
self.assertRaises(TypeError, test_axis) self.assertRaises(TypeError, test_axis)
paddle.disable_static()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
# Define functions about array. # Define functions about array.
from ..fluid.data_feeder import check_type, check_variable_and_dtype from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..framework import LayerHelper, _non_static_mode, core from ..framework import LayerHelper, core, in_dygraph_mode
from ..static import Variable from ..static import Variable
__all__ = [] __all__ = []
...@@ -45,12 +45,12 @@ def array_length(array): ...@@ -45,12 +45,12 @@ def array_length(array):
arr_len = paddle.tensor.array_length(arr) arr_len = paddle.tensor.array_length(arr)
print(arr_len) # 1 print(arr_len) # 1
""" """
if _non_static_mode(): if in_dygraph_mode():
assert isinstance( assert isinstance(
array, list array, list
), "The 'array' in array_write must be a list in dygraph mode" ), "The 'array' in array_write must be a list in dygraph mode"
return len(array) return len(array)
else:
if ( if (
not isinstance(array, Variable) not isinstance(array, Variable)
or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY
...@@ -63,7 +63,9 @@ def array_length(array): ...@@ -63,7 +63,9 @@ def array_length(array):
tmp = helper.create_variable_for_type_inference(dtype='int64') tmp = helper.create_variable_for_type_inference(dtype='int64')
tmp.stop_gradient = True tmp.stop_gradient = True
helper.append_op( helper.append_op(
type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]} type='lod_array_length',
inputs={'X': [array]},
outputs={'Out': [tmp]},
) )
return tmp return tmp
...@@ -107,7 +109,7 @@ def array_read(array, i): ...@@ -107,7 +109,7 @@ def array_read(array, i):
item = paddle.tensor.array_read(arr, i) item = paddle.tensor.array_read(arr, i)
print(item) # [[5., 5., 5.]] print(item) # [[5., 5., 5.]]
""" """
if _non_static_mode(): if in_dygraph_mode():
assert isinstance( assert isinstance(
array, list array, list
), "The 'array' in array_read must be list in dygraph mode" ), "The 'array' in array_read must be list in dygraph mode"
...@@ -119,7 +121,7 @@ def array_read(array, i): ...@@ -119,7 +121,7 @@ def array_read(array, i):
], "The shape of index 'i' should be [1] in dygraph mode" ], "The shape of index 'i' should be [1] in dygraph mode"
i = i.numpy().item(0) i = i.numpy().item(0)
return array[i] return array[i]
else:
check_variable_and_dtype(i, 'i', ['int64'], 'array_read') check_variable_and_dtype(i, 'i', ['int64'], 'array_read')
helper = LayerHelper('array_read', **locals()) helper = LayerHelper('array_read', **locals())
if ( if (
...@@ -167,7 +169,7 @@ def array_write(x, i, array=None): ...@@ -167,7 +169,7 @@ def array_write(x, i, array=None):
item = paddle.tensor.array_read(arr, i) item = paddle.tensor.array_read(arr, i)
print(item) # [[5., 5., 5.]] print(item) # [[5., 5., 5.]]
""" """
if _non_static_mode(): if in_dygraph_mode():
assert isinstance( assert isinstance(
x, Variable x, Variable
), "The input data 'x' in array_write must be Variable in dygraph mode" ), "The input data 'x' in array_write must be Variable in dygraph mode"
...@@ -191,7 +193,7 @@ def array_write(x, i, array=None): ...@@ -191,7 +193,7 @@ def array_write(x, i, array=None):
else: else:
array.append(x) array.append(x)
return array return array
else:
check_variable_and_dtype(i, 'i', ['int64'], 'array_write') check_variable_and_dtype(i, 'i', ['int64'], 'array_write')
check_type(x, 'x', (Variable), 'array_write') check_type(x, 'x', (Variable), 'array_write')
helper = LayerHelper('array_write', **locals()) helper = LayerHelper('array_write', **locals())
...@@ -265,9 +267,9 @@ def create_array(dtype, initialized_list=None): ...@@ -265,9 +267,9 @@ def create_array(dtype, initialized_list=None):
) )
) )
if _non_static_mode(): if in_dygraph_mode():
return array return array
else:
helper = LayerHelper("array", **locals()) helper = LayerHelper("array", **locals())
tensor_array = helper.create_variable( tensor_array = helper.create_variable(
name="{0}.out".format(helper.name), name="{0}.out".format(helper.name),
......
...@@ -17,10 +17,10 @@ ...@@ -17,10 +17,10 @@
import numpy as np import numpy as np
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops
from ..fluid.data_feeder import check_type, check_variable_and_dtype from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.framework import in_dygraph_mode
from ..framework import LayerHelper, core from ..framework import LayerHelper, core
from ..static import Variable from ..static import Variable
from .creation import _complex_to_real_dtype, assign from .creation import _complex_to_real_dtype, assign
...@@ -107,11 +107,7 @@ def shape(input): ...@@ -107,11 +107,7 @@ def shape(input):
out = _C_ops.shape(input) out = _C_ops.shape(input)
out.stop_gradient = True out.stop_gradient = True
return out return out
if _in_legacy_dygraph(): else:
out = _legacy_C_ops.shape(input)
out.stop_gradient = True
return out
check_variable_and_dtype( check_variable_and_dtype(
input, input,
'input', 'input',
...@@ -289,9 +285,7 @@ def real(x, name=None): ...@@ -289,9 +285,7 @@ def real(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.real(x) return _C_ops.real(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.real(x)
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real') check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real')
helper = LayerHelper('real', **locals()) helper = LayerHelper('real', **locals())
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(
...@@ -336,9 +330,7 @@ def imag(x, name=None): ...@@ -336,9 +330,7 @@ def imag(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.imag(x) return _C_ops.imag(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.imag(x)
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag') check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag')
helper = LayerHelper('imag', **locals()) helper = LayerHelper('imag', **locals())
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(
......
...@@ -33,7 +33,6 @@ from ..fluid.data_feeder import ( ...@@ -33,7 +33,6 @@ from ..fluid.data_feeder import (
from ..fluid.framework import ( from ..fluid.framework import (
Variable, Variable,
_in_eager_without_dygraph_check, _in_eager_without_dygraph_check,
_in_legacy_dygraph,
device_guard, device_guard,
) )
from ..fluid.initializer import Constant, Initializer from ..fluid.initializer import Constant, Initializer
...@@ -43,7 +42,6 @@ from ..framework import ( ...@@ -43,7 +42,6 @@ from ..framework import (
LayerHelper, LayerHelper,
_current_expected_place, _current_expected_place,
_get_paddle_place, _get_paddle_place,
_non_static_mode,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
core, core,
in_dygraph_mode, in_dygraph_mode,
...@@ -324,11 +322,7 @@ def linspace(start, stop, num, dtype=None, name=None): ...@@ -324,11 +322,7 @@ def linspace(start, stop, num, dtype=None, name=None):
dtype, dtype,
_current_expected_place(), _current_expected_place(),
) )
if _in_legacy_dygraph(): else:
return _legacy_C_ops.linspace(
tensor_start, tensor_stop, tensor_num, 'dtype', dtype
)
helper = LayerHelper("linspace", **locals()) helper = LayerHelper("linspace", **locals())
start_dtype = convert_dtype(tensor_start.dtype) start_dtype = convert_dtype(tensor_start.dtype)
...@@ -376,7 +370,11 @@ def linspace(start, stop, num, dtype=None, name=None): ...@@ -376,7 +370,11 @@ def linspace(start, stop, num, dtype=None, name=None):
helper.append_op( helper.append_op(
type='linspace', type='linspace',
inputs={'Start': tensor_start, 'Stop': tensor_stop, 'Num': tensor_num}, inputs={
'Start': tensor_start,
'Stop': tensor_stop,
'Num': tensor_num,
},
attrs={'dtype': dtype}, attrs={'dtype': dtype},
outputs={'Out': [out]}, outputs={'Out': [out]},
) )
...@@ -446,11 +444,11 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None): ...@@ -446,11 +444,11 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None):
if not isinstance(base, Variable): if not isinstance(base, Variable):
with device_guard("cpu"): with device_guard("cpu"):
tensor_base = fill_constant([1], dtype, base) tensor_base = fill_constant([1], dtype, base)
if _non_static_mode(): if in_dygraph_mode():
return _legacy_C_ops.logspace( return _legacy_C_ops.logspace(
tensor_start, tensor_stop, tensor_num, tensor_base, 'dtype', dtype tensor_start, tensor_stop, tensor_num, tensor_base, 'dtype', dtype
) )
else:
helper = LayerHelper("logspace", **locals()) helper = LayerHelper("logspace", **locals())
start_dtype = convert_dtype(tensor_start.dtype) start_dtype = convert_dtype(tensor_start.dtype)
...@@ -746,7 +744,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True): ...@@ -746,7 +744,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
if place is None: if place is None:
place = _current_expected_place() place = _current_expected_place()
if _non_static_mode(): if paddle.fluid.framework._non_static_mode():
return _to_tensor_non_static(data, dtype, place, stop_gradient) return _to_tensor_non_static(data, dtype, place, stop_gradient)
# call assign for static graph # call assign for static graph
...@@ -785,32 +783,41 @@ def full_like(x, fill_value, dtype=None, name=None): ...@@ -785,32 +783,41 @@ def full_like(x, fill_value, dtype=None, name=None):
# [[2. 2. 2.] # [[2. 2. 2.]
# [2. 2. 2.]] # [2. 2. 2.]]
""" """
if dtype is None: if dtype is None:
dtype = x.dtype dtype = x.dtype
else: else:
if not isinstance(dtype, core.VarDesc.VarType): if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype) dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.full_like(x, fill_value, dtype, x.place) return _C_ops.full_like(x, fill_value, dtype, x.place)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.fill_any_like(
x, 'value', fill_value, 'dtype', dtype
)
helper = LayerHelper("full_like", **locals()) helper = LayerHelper("full_like", **locals())
check_variable_and_dtype( check_variable_and_dtype(
x, x,
'x', 'x',
['bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64'], [
'bool',
'float16',
'float32',
'float64',
'int16',
'int32',
'int64',
],
'full_like', 'full_like',
) )
check_dtype( check_dtype(
dtype, dtype,
'dtype', 'dtype',
['bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64'], [
'bool',
'float16',
'float32',
'float64',
'int16',
'int32',
'int64',
],
'full_like/zeros_like/ones_like', 'full_like/zeros_like/ones_like',
) )
out = helper.create_variable_for_type_inference(dtype=dtype) out = helper.create_variable_for_type_inference(dtype=dtype)
...@@ -1011,7 +1018,7 @@ def eye(num_rows, num_columns=None, dtype=None, name=None): ...@@ -1011,7 +1018,7 @@ def eye(num_rows, num_columns=None, dtype=None, name=None):
""" """
def _check_attr(attr, message): def _check_attr(attr, message):
if isinstance(attr, ((Variable, core.VarBase, core.eager.Tensor))): if isinstance(attr, ((Variable, core.eager.Tensor))):
assert len(attr.shape) == 1 and attr.shape[0] in [1, -1] assert len(attr.shape) == 1 and attr.shape[0] in [1, -1]
elif not isinstance(attr, int) or attr < 0: elif not isinstance(attr, int) or attr < 0:
raise TypeError("{} should be a non-negative int.".format(message)) raise TypeError("{} should be a non-negative int.".format(message))
...@@ -1027,16 +1034,10 @@ def eye(num_rows, num_columns=None, dtype=None, name=None): ...@@ -1027,16 +1034,10 @@ def eye(num_rows, num_columns=None, dtype=None, name=None):
else: else:
num_columns = num_rows num_columns = num_rows
if _non_static_mode():
if in_dygraph_mode(): if in_dygraph_mode():
out = _C_ops.eye( out = _C_ops.eye(
num_rows, num_columns, dtype, _current_expected_place() num_rows, num_columns, dtype, _current_expected_place()
) )
elif _in_legacy_dygraph():
out = _legacy_C_ops.eye(
'dtype', dtype, 'num_rows', num_rows, 'num_columns', num_columns
)
else: else:
helper = LayerHelper("eye", **locals()) helper = LayerHelper("eye", **locals())
check_dtype( check_dtype(
...@@ -1211,14 +1212,12 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): ...@@ -1211,14 +1212,12 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.arange(start, end, step, dtype, _current_expected_place()) return _C_ops.arange(start, end, step, dtype, _current_expected_place())
else:
if _in_legacy_dygraph():
out = _legacy_C_ops.range(start, end, step)
out.stop_gradient = True
return out
check_dtype( check_dtype(
dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'range/arange' dtype,
'dtype',
['float32', 'float64', 'int32', 'int64'],
'range/arange',
) )
helper = LayerHelper('range', **locals()) helper = LayerHelper('range', **locals())
out = helper.create_variable_for_type_inference(dtype, shape=out_shape) out = helper.create_variable_for_type_inference(dtype, shape=out_shape)
...@@ -1328,11 +1327,7 @@ def tril(x, diagonal=0, name=None): ...@@ -1328,11 +1327,7 @@ def tril(x, diagonal=0, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.tril(x, diagonal, True) return _C_ops.tril(x, diagonal, True)
else:
if _in_legacy_dygraph():
op = getattr(_legacy_C_ops, 'tril_triu')
return op(x, 'diagonal', diagonal, "lower", True)
return _tril_triu_op(LayerHelper('tril', **locals())) return _tril_triu_op(LayerHelper('tril', **locals()))
...@@ -1394,11 +1389,7 @@ def triu(x, diagonal=0, name=None): ...@@ -1394,11 +1389,7 @@ def triu(x, diagonal=0, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.triu(x, diagonal, False) return _C_ops.triu(x, diagonal, False)
else:
if _in_legacy_dygraph():
op = getattr(_legacy_C_ops, 'tril_triu')
return op(x, 'diagonal', diagonal, "lower", False)
return _tril_triu_op(LayerHelper('triu', **locals())) return _tril_triu_op(LayerHelper('triu', **locals()))
...@@ -1437,18 +1428,16 @@ def meshgrid(*args, **kwargs): ...@@ -1437,18 +1428,16 @@ def meshgrid(*args, **kwargs):
if len(args) == 1 and isinstance(args[0], (list, tuple)): if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0] args = args[0]
if _in_legacy_dygraph():
num = len(args)
out = _legacy_C_ops.meshgrid(list(args), num)
return out
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.meshgrid(list(args)) return _C_ops.meshgrid(list(args))
else:
name = kwargs.get("name", None) name = kwargs.get("name", None)
helper = LayerHelper('meshgrid', **locals()) helper = LayerHelper('meshgrid', **locals())
if not isinstance(args, (list, tuple)): if not isinstance(args, (list, tuple)):
raise TypeError("The type of input args in meshgrid should be list.") raise TypeError(
"The type of input args in meshgrid should be list."
)
for id, input_ in enumerate(args): for id, input_ in enumerate(args):
check_dtype( check_dtype(
...@@ -1555,27 +1544,14 @@ def diagflat(x, offset=0, name=None): ...@@ -1555,27 +1544,14 @@ def diagflat(x, offset=0, name=None):
# [0, 0, 3, 0, 0], # [0, 0, 3, 0, 0],
# [0, 0, 0, 4, 0]]) # [0, 0, 0, 4, 0]])
""" """
padding_value = 0
if in_dygraph_mode(): if in_dygraph_mode():
if len(x.shape) <= 1: if len(x.shape) <= 1:
return _C_ops.diag(x, offset, padding_value) return _C_ops.diag(x, offset, 0)
else: else:
y = _C_ops.flatten(x, 0, -1) y = _C_ops.flatten(x, 0, -1)
return _C_ops.diag(y, offset, padding_value) return _C_ops.diag(y, offset, 0)
if _in_legacy_dygraph():
if len(x.shape) == 1:
return _legacy_C_ops.diag_v2(
x, "offset", offset, "padding_value", padding_value
)
else: else:
y, _ = _legacy_C_ops.flatten_contiguous_range( padding_value = 0
x, "start_axis", 0, "stop_axis", -1
)
return _legacy_C_ops.diag_v2(
y, "offset", offset, "padding_value", padding_value
)
check_type(x, 'x', (Variable), 'diagflat') check_type(x, 'x', (Variable), 'diagflat')
check_dtype( check_dtype(
x.dtype, 'x', ['float32', 'float64', 'int32', 'int64'], 'diagflat' x.dtype, 'x', ['float32', 'float64', 'int32', 'int64'], 'diagflat'
...@@ -1690,11 +1666,6 @@ def diag(x, offset=0, padding_value=0, name=None): ...@@ -1690,11 +1666,6 @@ def diag(x, offset=0, padding_value=0, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.diag(x, offset, padding_value) return _C_ops.diag(x, offset, padding_value)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.diag_v2(
x, "offset", offset, "padding_value", padding_value
)
else: else:
check_type(x, 'x', (Variable), 'diag_v2') check_type(x, 'x', (Variable), 'diag_v2')
check_dtype( check_dtype(
...@@ -1782,15 +1753,7 @@ def empty(shape, dtype=None, name=None): ...@@ -1782,15 +1753,7 @@ def empty(shape, dtype=None, name=None):
) )
out.stop_gradient = True out.stop_gradient = True
return out return out
else:
if _in_legacy_dygraph():
shape = utils.convert_shape_to_list(shape)
out = _legacy_C_ops.empty(
'shape', shape, 'dtype', convert_np_dtype_to_dtype_(dtype)
)
out.stop_gradient = True
return out
helper = LayerHelper("empty", **locals()) helper = LayerHelper("empty", **locals())
inputs = {} inputs = {}
...@@ -1863,14 +1826,7 @@ def empty_like(x, dtype=None, name=None): ...@@ -1863,14 +1826,7 @@ def empty_like(x, dtype=None, name=None):
) )
out.stop_gradient = True out.stop_gradient = True
return out return out
else:
if _in_legacy_dygraph():
out = _legacy_C_ops.empty(
'shape', x.shape, 'dtype', convert_np_dtype_to_dtype_(dtype)
)
out.stop_gradient = True
return out
helper = LayerHelper("empty_like", **locals()) helper = LayerHelper("empty_like", **locals())
check_variable_and_dtype( check_variable_and_dtype(
x, x,
...@@ -1958,10 +1914,6 @@ def assign(x, output=None): ...@@ -1958,10 +1914,6 @@ def assign(x, output=None):
output = _C_ops.assign(input) output = _C_ops.assign(input)
else: else:
_C_ops.assign_out_(input, output) _C_ops.assign_out_(input, output)
elif _in_legacy_dygraph():
if output is None:
output = core.VarBase()
_legacy_C_ops.assign(input, output)
else: else:
check_dtype( check_dtype(
input.dtype, input.dtype,
...@@ -2060,18 +2012,6 @@ def assign(x, output=None): ...@@ -2060,18 +2012,6 @@ def assign(x, output=None):
values, values,
_current_expected_place(), _current_expected_place(),
) )
elif _in_legacy_dygraph():
if output is None:
output = core.VarBase()
_legacy_C_ops.assign_value(
output,
'shape',
list(input.shape),
'dtype',
dtype,
value_name,
values,
)
else: else:
if output is None: if output is None:
output = helper.create_variable_for_type_inference( output = helper.create_variable_for_type_inference(
...@@ -2087,9 +2027,6 @@ def assign(x, output=None): ...@@ -2087,9 +2027,6 @@ def assign(x, output=None):
}, },
) )
if is_inplace and _in_legacy_dygraph():
output._bump_inplace_version()
return output return output
...@@ -2227,12 +2164,13 @@ def complex(real, imag, name=None): ...@@ -2227,12 +2164,13 @@ def complex(real, imag, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.complex(real, imag) return _C_ops.complex(real, imag)
else:
if paddle.in_dynamic_mode(): check_variable_and_dtype(
return paddle._legacy_C_ops.complex(real, imag) real, 'real', ['float32', 'float64'], 'complex'
)
check_variable_and_dtype(real, 'real', ['float32', 'float64'], 'complex') check_variable_and_dtype(
check_variable_and_dtype(imag, 'imag', ['float32', 'float64'], 'complex') imag, 'imag', ['float32', 'float64'], 'complex'
)
op_type = "complex" op_type = "complex"
helper = LayerHelper(op_type, **locals()) helper = LayerHelper(op_type, **locals())
...@@ -2242,7 +2180,9 @@ def complex(real, imag, name=None): ...@@ -2242,7 +2180,9 @@ def complex(real, imag, name=None):
) )
outputs = {"Out": out} outputs = {"Out": out}
attrs = {} attrs = {}
helper.append_op(type=op_type, inputs=inputs, attrs=attrs, outputs=outputs) helper.append_op(
type=op_type, inputs=inputs, attrs=attrs, outputs=outputs
)
return out return out
...@@ -2291,6 +2231,17 @@ def tril_indices(row, col, offset=0, dtype='int64'): ...@@ -2291,6 +2231,17 @@ def tril_indices(row, col, offset=0, dtype='int64'):
# [[ 1, 2, 2, 3, 3, 3], # [[ 1, 2, 2, 3, 3, 3],
# [ 0, 0, 1, 0, 1, 2]] # [ 0, 0, 1, 0, 1, 2]]
""" """
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
if col is None:
col = row
out = _C_ops.tril_indices(
row, col, offset, dtype, _current_expected_place()
)
return out
else:
if not isinstance(row, int) or row < 0: if not isinstance(row, int) or row < 0:
raise TypeError("row should be a non-negative int") raise TypeError("row should be a non-negative int")
...@@ -2303,22 +2254,6 @@ def tril_indices(row, col, offset=0, dtype='int64'): ...@@ -2303,22 +2254,6 @@ def tril_indices(row, col, offset=0, dtype='int64'):
if not isinstance(offset, int): if not isinstance(offset, int):
raise TypeError("offset should be a int") raise TypeError("offset should be a int")
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
out = _C_ops.tril_indices(
row, col, offset, dtype, _current_expected_place()
)
return out
if _in_legacy_dygraph():
out = _legacy_C_ops.tril_indices(
'rows', row, 'cols', col, 'offset', offset, "dtype", dtype
)
return out
else:
helper = LayerHelper("tril_indices", **locals()) helper = LayerHelper("tril_indices", **locals())
out = helper.create_variable_for_type_inference(dtype=dtype) out = helper.create_variable_for_type_inference(dtype=dtype)
...@@ -2375,6 +2310,17 @@ def triu_indices(row, col=None, offset=0, dtype='int64'): ...@@ -2375,6 +2310,17 @@ def triu_indices(row, col=None, offset=0, dtype='int64'):
# [[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3], # [[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3],
# [0, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 2, 3]] # [0, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 2, 3]]
""" """
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
if col is None:
col = row
out = _C_ops.triu_indices(
row, col, offset, dtype, _current_expected_place()
)
return out
else:
if not isinstance(row, int) or row < 0: if not isinstance(row, int) or row < 0:
raise TypeError("row should be a non-negative int") raise TypeError("row should be a non-negative int")
...@@ -2387,22 +2333,6 @@ def triu_indices(row, col=None, offset=0, dtype='int64'): ...@@ -2387,22 +2333,6 @@ def triu_indices(row, col=None, offset=0, dtype='int64'):
if not isinstance(offset, int): if not isinstance(offset, int):
raise TypeError("offset should be a int") raise TypeError("offset should be a int")
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
out = _C_ops.triu_indices(
row, col, offset, dtype, _current_expected_place()
)
return out
if _in_legacy_dygraph():
out = _legacy_C_ops.triu_indices(
'row', row, 'col', col, 'offset', offset, "dtype", dtype
)
return out
else:
helper = LayerHelper("triu_indices", **locals()) helper = LayerHelper("triu_indices", **locals())
out = helper.create_variable_for_type_inference(dtype=dtype) out = helper.create_variable_for_type_inference(dtype=dtype)
......
...@@ -20,10 +20,10 @@ import string ...@@ -20,10 +20,10 @@ import string
import numpy as np import numpy as np
import opt_einsum import opt_einsum
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops
from ..fluid.data_feeder import check_type, check_variable_and_dtype from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.framework import in_dygraph_mode
from ..fluid.layer_helper import LayerHelper from ..fluid.layer_helper import LayerHelper
from .linalg import matmul, transpose from .linalg import matmul, transpose
from .manipulation import reshape, squeeze, unsqueeze from .manipulation import reshape, squeeze, unsqueeze
...@@ -829,18 +829,15 @@ def gen_einsum_op(equation, *operands): ...@@ -829,18 +829,15 @@ def gen_einsum_op(equation, *operands):
""" """
EinsumOp Python Interface: EinsumOp Python Interface:
""" """
assert len(operands) <= 2, "Only support two operands in EinsumOp."
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.einsum(operands, equation)[0] return _C_ops.einsum(operands, equation)[0]
else:
if _in_legacy_dygraph(): assert len(operands) <= 2, "Only support two operands in EinsumOp."
# dygraph
return _legacy_C_ops.einsum(
operands, len(operands), len(operands), 'equation', equation
)[0]
for inp in operands: for inp in operands:
check_variable_and_dtype(inp, 'dtype', ['float32', 'float64'], 'einsum') check_variable_and_dtype(
inp, 'dtype', ['float32', 'float64'], 'einsum'
)
check_type(equation, 'equation', str, 'einsum') check_type(equation, 'equation', str, 'einsum')
helper = LayerHelper('einsum', **locals()) helper = LayerHelper('einsum', **locals())
out = helper.create_variable_for_type_inference(dtype=operands[0].dtype) out = helper.create_variable_for_type_inference(dtype=operands[0].dtype)
......
...@@ -24,7 +24,6 @@ from ..fluid.proto import framework_pb2 ...@@ -24,7 +24,6 @@ from ..fluid.proto import framework_pb2
from ..framework import ( from ..framework import (
LayerHelper, LayerHelper,
OpProtoHolder, OpProtoHolder,
_non_static_mode,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
core, core,
in_dygraph_mode, in_dygraph_mode,
...@@ -274,15 +273,16 @@ def generate_activation_fn(op_type): ...@@ -274,15 +273,16 @@ def generate_activation_fn(op_type):
op_proto = OpProtoHolder.instance().get_op_proto(op_type) op_proto = OpProtoHolder.instance().get_op_proto(op_type)
def func(x, name=None): def func(x, name=None):
if in_dygraph_mode() and hasattr(_C_ops, op_type): if in_dygraph_mode():
if hasattr(_C_ops, op_type):
op = getattr(_C_ops, op_type) op = getattr(_C_ops, op_type)
return op(x) return op(x)
else:
# TODO(dev): Because some ops' yaml has not been migrated. # TODO(dev): Because some ops' yaml has not been migrated.
# Replace it with _in_legacy_dygraph while all yaml work is done. # Replace it with _C_ops while all yaml work is done.
if _non_static_mode():
op = getattr(_legacy_C_ops, op_type) op = getattr(_legacy_C_ops, op_type)
return op(x) return op(x)
else:
if op_type not in ["abs", "exp", "square"]: if op_type not in ["abs", "exp", "square"]:
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], op_type x, 'x', ['float16', 'float32', 'float64'], op_type
...@@ -307,7 +307,9 @@ def generate_activation_fn(op_type): ...@@ -307,7 +307,9 @@ def generate_activation_fn(op_type):
helper = LayerHelper(op_type, **locals()) helper = LayerHelper(op_type, **locals())
output = helper.create_variable_for_type_inference(dtype=x.dtype) output = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": output}) helper.append_op(
type=op_type, inputs={"X": x}, outputs={"Out": output}
)
return output return output
func.__name__ = op_type func.__name__ = op_type
...@@ -332,12 +334,14 @@ def generate_inplace_fn(inplace_op_type): ...@@ -332,12 +334,14 @@ def generate_inplace_fn(inplace_op_type):
origin_op_type = inplace_op_type[:-1] origin_op_type = inplace_op_type[:-1]
def func(x, name=None): def func(x, name=None):
if in_dygraph_mode() and hasattr(_C_ops, inplace_op_type): if in_dygraph_mode():
if hasattr(_C_ops, inplace_op_type):
op = getattr(_C_ops, inplace_op_type) op = getattr(_C_ops, inplace_op_type)
return op(x) return op(x)
if _non_static_mode(): else:
op = getattr(_legacy_C_ops, inplace_op_type) op = getattr(_legacy_C_ops, inplace_op_type)
return op(x) return op(x)
else:
warnings.warn( warnings.warn(
"In static mode, {}() is the same as {}() and does not perform inplace operation.".format( "In static mode, {}() is the same as {}() and does not perform inplace operation.".format(
inplace_op_type, origin_op_type inplace_op_type, origin_op_type
......
此差异已折叠。
...@@ -26,10 +26,9 @@ if _in_eager_mode_: ...@@ -26,10 +26,9 @@ if _in_eager_mode_:
else: else:
from ..framework import VarBase as Tensor from ..framework import VarBase as Tensor
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops
from paddle.tensor.creation import full from paddle.tensor.creation import full
from ..fluid.framework import _in_legacy_dygraph
from ..framework import LayerHelper, in_dygraph_mode from ..framework import LayerHelper, in_dygraph_mode
__all__ = [] __all__ = []
...@@ -42,12 +41,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): ...@@ -42,12 +41,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
return op(x, y) return op(x, y)
else: else:
return op(x) return op(x)
elif _in_legacy_dygraph():
op = getattr(_legacy_C_ops, op_name)
if binary_op:
return op(x, y)
else: else:
return op(x)
check_variable_and_dtype( check_variable_and_dtype(
x, x,
"x", "x",
...@@ -58,7 +52,15 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): ...@@ -58,7 +52,15 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
check_variable_and_dtype( check_variable_and_dtype(
y, y,
"y", "y",
["bool", "int8", "int16", "int32", "int64", "float32", "float64"], [
"bool",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
],
op_name, op_name,
) )
if out is not None: if out is not None:
...@@ -80,7 +82,9 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): ...@@ -80,7 +82,9 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out} type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}
) )
else: else:
helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out}) helper.append_op(
type=op_name, inputs={"X": x}, outputs={"Out": out}
)
return out return out
...@@ -288,9 +292,7 @@ def is_empty(x, name=None): ...@@ -288,9 +292,7 @@ def is_empty(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.is_empty(x) return _C_ops.is_empty(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.is_empty(x)
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'is_empty' x, 'x', ['float32', 'float64', 'int32', 'int64'], 'is_empty'
) )
...@@ -336,14 +338,13 @@ def equal_all(x, y, name=None): ...@@ -336,14 +338,13 @@ def equal_all(x, y, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.equal_all(x, y) return _C_ops.equal_all(x, y)
else:
if paddle.in_dynamic_mode():
return _legacy_C_ops.equal_all(x, y)
helper = LayerHelper("equal_all", **locals()) helper = LayerHelper("equal_all", **locals())
out = helper.create_variable_for_type_inference(dtype='bool') out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op( helper.append_op(
type='equal_all', inputs={'X': [x], 'Y': [y]}, outputs={'Out': [out]} type='equal_all',
inputs={'X': [x], 'Y': [y]},
outputs={'Out': [out]},
) )
return out return out
...@@ -393,10 +394,7 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): ...@@ -393,10 +394,7 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.allclose(x, y, rtol, atol, equal_nan) return _C_ops.allclose(x, y, rtol, atol, equal_nan)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.allclose(
x, y, 'rtol', str(rtol), 'atol', str(atol), 'equal_nan', equal_nan
)
check_variable_and_dtype(x, "input", ['float32', 'float64'], 'allclose') check_variable_and_dtype(x, "input", ['float32', 'float64'], 'allclose')
check_variable_and_dtype(y, "input", ['float32', 'float64'], 'allclose') check_variable_and_dtype(y, "input", ['float32', 'float64'], 'allclose')
check_type(rtol, 'rtol', float, 'allclose') check_type(rtol, 'rtol', float, 'allclose')
...@@ -456,9 +454,6 @@ def equal(x, y, name=None): ...@@ -456,9 +454,6 @@ def equal(x, y, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.equal(x, y) return _C_ops.equal(x, y)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.equal(x, y)
else: else:
check_variable_and_dtype( check_variable_and_dtype(
x, x,
...@@ -512,9 +507,6 @@ def greater_equal(x, y, name=None): ...@@ -512,9 +507,6 @@ def greater_equal(x, y, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.greater_equal(x, y) return _C_ops.greater_equal(x, y)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.greater_equal(x, y)
else: else:
check_variable_and_dtype( check_variable_and_dtype(
x, x,
...@@ -568,9 +560,6 @@ def greater_than(x, y, name=None): ...@@ -568,9 +560,6 @@ def greater_than(x, y, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.greater_than(x, y) return _C_ops.greater_than(x, y)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.greater_than(x, y)
else: else:
check_variable_and_dtype( check_variable_and_dtype(
x, x,
...@@ -625,9 +614,6 @@ def less_equal(x, y, name=None): ...@@ -625,9 +614,6 @@ def less_equal(x, y, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.less_equal(x, y) return _C_ops.less_equal(x, y)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.less_equal(x, y)
else: else:
check_variable_and_dtype( check_variable_and_dtype(
x, x,
...@@ -682,9 +668,6 @@ def less_than(x, y, name=None): ...@@ -682,9 +668,6 @@ def less_than(x, y, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.less_than(x, y) return _C_ops.less_than(x, y)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.less_than(x, y)
else: else:
check_variable_and_dtype( check_variable_and_dtype(
x, x,
...@@ -739,9 +722,6 @@ def not_equal(x, y, name=None): ...@@ -739,9 +722,6 @@ def not_equal(x, y, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.not_equal(x, y) return _C_ops.not_equal(x, y)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.not_equal(x, y)
else: else:
check_variable_and_dtype( check_variable_and_dtype(
x, x,
...@@ -802,15 +782,12 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True): ...@@ -802,15 +782,12 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
return op(x, y) return op(x, y)
else: else:
return op(x) return op(x)
elif _in_legacy_dygraph():
op = getattr(_legacy_C_ops, op_name)
if binary_op:
return op(x, y)
else: else:
return op(x)
check_variable_and_dtype( check_variable_and_dtype(
x, "x", ["bool", "uint8", "int8", "int16", "int32", "int64"], op_name x,
"x",
["bool", "uint8", "int8", "int16", "int32", "int64"],
op_name,
) )
if y is not None: if y is not None:
check_variable_and_dtype( check_variable_and_dtype(
...@@ -834,7 +811,9 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True): ...@@ -834,7 +811,9 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out} type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}
) )
else: else:
helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out}) helper.append_op(
type=op_name, inputs={"X": x}, outputs={"Out": out}
)
return out return out
...@@ -998,11 +977,7 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): ...@@ -998,11 +977,7 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.isclose(x, y, rtol, atol, equal_nan) return _C_ops.isclose(x, y, rtol, atol, equal_nan)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.isclose(
x, y, 'rtol', str(rtol), 'atol', str(atol), 'equal_nan', equal_nan
)
check_variable_and_dtype(x, "input", ['float32', 'float64'], 'isclose') check_variable_and_dtype(x, "input", ['float32', 'float64'], 'isclose')
check_variable_and_dtype(y, "input", ['float32', 'float64'], 'isclose') check_variable_and_dtype(y, "input", ['float32', 'float64'], 'isclose')
check_type(rtol, 'rtol', float, 'isclose') check_type(rtol, 'rtol', float, 'isclose')
......
此差异已折叠。
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from .. import _C_ops, _legacy_C_ops from .. import _C_ops
from ..fluid.data_feeder import check_variable_and_dtype from ..fluid.data_feeder import check_variable_and_dtype
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.framework import in_dygraph_mode
from ..framework import LayerHelper from ..framework import LayerHelper
from .layer_function_generator import ( from .layer_function_generator import (
add_sample_code, add_sample_code,
...@@ -218,10 +218,10 @@ def acos(x, name=None): ...@@ -218,10 +218,10 @@ def acos(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.acos(x) return _C_ops.acos(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.acos(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'acos'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'acos') )
helper = LayerHelper('acos', **locals()) helper = LayerHelper('acos', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='acos', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='acos', inputs={"X": x}, outputs={"Out": out})
...@@ -255,10 +255,10 @@ def acosh(x, name=None): ...@@ -255,10 +255,10 @@ def acosh(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.acosh(x) return _C_ops.acosh(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.acosh(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'acosh'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'acosh') )
helper = LayerHelper('acosh', **locals()) helper = LayerHelper('acosh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='acosh', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='acosh', inputs={"X": x}, outputs={"Out": out})
...@@ -292,10 +292,10 @@ def asin(x, name=None): ...@@ -292,10 +292,10 @@ def asin(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.asin(x) return _C_ops.asin(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.asin(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'asin'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'asin') )
helper = LayerHelper('asin', **locals()) helper = LayerHelper('asin', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='asin', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='asin', inputs={"X": x}, outputs={"Out": out})
...@@ -329,10 +329,10 @@ def asinh(x, name=None): ...@@ -329,10 +329,10 @@ def asinh(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.asinh(x) return _C_ops.asinh(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.asinh(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'asinh'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'asinh') )
helper = LayerHelper('asinh', **locals()) helper = LayerHelper('asinh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='asinh', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='asinh', inputs={"X": x}, outputs={"Out": out})
...@@ -366,10 +366,10 @@ def atan(x, name=None): ...@@ -366,10 +366,10 @@ def atan(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.atan(x) return _C_ops.atan(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.atan(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'atan'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'atan') )
helper = LayerHelper('atan', **locals()) helper = LayerHelper('atan', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='atan', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='atan', inputs={"X": x}, outputs={"Out": out})
...@@ -403,10 +403,10 @@ def atanh(x, name=None): ...@@ -403,10 +403,10 @@ def atanh(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.atanh(x) return _C_ops.atanh(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.atanh(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'atanh'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'atanh') )
helper = LayerHelper('atanh', **locals()) helper = LayerHelper('atanh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='atanh', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='atanh', inputs={"X": x}, outputs={"Out": out})
...@@ -441,10 +441,10 @@ def ceil(x, name=None): ...@@ -441,10 +441,10 @@ def ceil(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.ceil(x) return _C_ops.ceil(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.ceil(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'ceil'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'ceil') )
helper = LayerHelper('ceil', **locals()) helper = LayerHelper('ceil', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='ceil', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='ceil', inputs={"X": x}, outputs={"Out": out})
...@@ -480,10 +480,10 @@ def cos(x, name=None): ...@@ -480,10 +480,10 @@ def cos(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.cos(x) return _C_ops.cos(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.cos(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'cos'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'cos') )
helper = LayerHelper('cos', **locals()) helper = LayerHelper('cos', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='cos', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='cos', inputs={"X": x}, outputs={"Out": out})
...@@ -519,10 +519,10 @@ def cosh(x, name=None): ...@@ -519,10 +519,10 @@ def cosh(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.cosh(x) return _C_ops.cosh(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.cosh(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'cosh'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'cosh') )
helper = LayerHelper('cosh', **locals()) helper = LayerHelper('cosh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='cosh', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='cosh', inputs={"X": x}, outputs={"Out": out})
...@@ -557,9 +557,7 @@ def exp(x, name=None): ...@@ -557,9 +557,7 @@ def exp(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.exp(x) return _C_ops.exp(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.exp(x)
check_variable_and_dtype( check_variable_and_dtype(
x, x,
'x', 'x',
...@@ -608,10 +606,10 @@ def expm1(x, name=None): ...@@ -608,10 +606,10 @@ def expm1(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.expm1(x) return _C_ops.expm1(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.expm1(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'expm1'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'expm1') )
helper = LayerHelper('expm1', **locals()) helper = LayerHelper('expm1', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='expm1', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='expm1', inputs={"X": x}, outputs={"Out": out})
...@@ -646,10 +644,10 @@ def floor(x, name=None): ...@@ -646,10 +644,10 @@ def floor(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.floor(x) return _C_ops.floor(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.floor(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'floor'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'floor') )
helper = LayerHelper('floor', **locals()) helper = LayerHelper('floor', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='floor', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='floor', inputs={"X": x}, outputs={"Out": out})
...@@ -684,15 +682,15 @@ def reciprocal(x, name=None): ...@@ -684,15 +682,15 @@ def reciprocal(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.reciprocal(x) return _C_ops.reciprocal(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.reciprocal(x)
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'reciprocal' x, 'x', ['float16', 'float32', 'float64'], 'reciprocal'
) )
helper = LayerHelper('reciprocal', **locals()) helper = LayerHelper('reciprocal', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='reciprocal', inputs={"X": x}, outputs={"Out": out}) helper.append_op(
type='reciprocal', inputs={"X": x}, outputs={"Out": out}
)
return out return out
...@@ -731,10 +729,10 @@ def round(x, name=None): ...@@ -731,10 +729,10 @@ def round(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.round(x) return _C_ops.round(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.round(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'round'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'round') )
helper = LayerHelper('round', **locals()) helper = LayerHelper('round', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='round', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='round', inputs={"X": x}, outputs={"Out": out})
...@@ -770,10 +768,10 @@ def rsqrt(x, name=None): ...@@ -770,10 +768,10 @@ def rsqrt(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.rsqrt(x) return _C_ops.rsqrt(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.rsqrt(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'rsqrt'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'rsqrt') )
helper = LayerHelper('rsqrt', **locals()) helper = LayerHelper('rsqrt', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='rsqrt', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='rsqrt', inputs={"X": x}, outputs={"Out": out})
...@@ -808,9 +806,7 @@ def sigmoid(x, name=None): ...@@ -808,9 +806,7 @@ def sigmoid(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.sigmoid(x) return _C_ops.sigmoid(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.sigmoid(x)
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'sigmoid' x, 'x', ['float16', 'float32', 'float64'], 'sigmoid'
) )
...@@ -847,10 +843,10 @@ def sin(x, name=None): ...@@ -847,10 +843,10 @@ def sin(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.sin(x) return _C_ops.sin(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.sin(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'sin'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'sin') )
helper = LayerHelper('sin', **locals()) helper = LayerHelper('sin', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='sin', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='sin', inputs={"X": x}, outputs={"Out": out})
...@@ -884,10 +880,10 @@ def sinh(x, name=None): ...@@ -884,10 +880,10 @@ def sinh(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.sinh(x) return _C_ops.sinh(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.sinh(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'sinh'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'sinh') )
helper = LayerHelper('sinh', **locals()) helper = LayerHelper('sinh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='sinh', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='sinh', inputs={"X": x}, outputs={"Out": out})
...@@ -920,10 +916,10 @@ def sqrt(x, name=None): ...@@ -920,10 +916,10 @@ def sqrt(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.sqrt(x) return _C_ops.sqrt(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.sqrt(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'sqrt'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'sqrt') )
helper = LayerHelper('sqrt', **locals()) helper = LayerHelper('sqrt', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='sqrt', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='sqrt', inputs={"X": x}, outputs={"Out": out})
...@@ -956,9 +952,7 @@ def square(x, name=None): ...@@ -956,9 +952,7 @@ def square(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.square(x) return _C_ops.square(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.square(x)
check_variable_and_dtype( check_variable_and_dtype(
x, x,
'x', 'x',
...@@ -1008,10 +1002,10 @@ def tan(x, name=None): ...@@ -1008,10 +1002,10 @@ def tan(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.tan(x) return _C_ops.tan(x)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.tan(x) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'tan'
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'tan') )
helper = LayerHelper('tan', **locals()) helper = LayerHelper('tan', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='tan', inputs={"X": x}, outputs={"Out": out}) helper.append_op(type='tan', inputs={"X": x}, outputs={"Out": out})
......
...@@ -16,11 +16,7 @@ ...@@ -16,11 +16,7 @@
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import ( from paddle.fluid.framework import _current_expected_place, in_dygraph_mode
_current_expected_place,
_in_legacy_dygraph,
in_dygraph_mode,
)
from paddle.static import Variable from paddle.static import Variable
from ..fluid.data_feeder import ( from ..fluid.data_feeder import (
...@@ -80,10 +76,7 @@ def bernoulli(x, name=None): ...@@ -80,10 +76,7 @@ def bernoulli(x, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.bernoulli(x) return _C_ops.bernoulli(x)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.bernoulli(x)
check_variable_and_dtype(x, "x", ["float32", "float64"], "bernoulli") check_variable_and_dtype(x, "x", ["float32", "float64"], "bernoulli")
helper = LayerHelper("randint", **locals()) helper = LayerHelper("randint", **locals())
...@@ -129,10 +122,7 @@ def poisson(x, name=None): ...@@ -129,10 +122,7 @@ def poisson(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.poisson(x) return _C_ops.poisson(x)
else:
if paddle.in_dynamic_mode():
return _legacy_C_ops.poisson(x)
check_variable_and_dtype(x, "x", ["float32", "float64"], "poisson") check_variable_and_dtype(x, "x", ["float32", "float64"], "poisson")
helper = LayerHelper("poisson", **locals()) helper = LayerHelper("poisson", **locals())
...@@ -197,12 +187,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None): ...@@ -197,12 +187,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.multinomial(x, num_samples, replacement) return _C_ops.multinomial(x, num_samples, replacement)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.multinomial(
x, 'num_samples', num_samples, 'replacement', replacement
)
check_variable_and_dtype(x, "x", ["float32", "float64"], "multinomial") check_variable_and_dtype(x, "x", ["float32", "float64"], "multinomial")
helper = LayerHelper("multinomial", **locals()) helper = LayerHelper("multinomial", **locals())
...@@ -356,22 +341,7 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None): ...@@ -356,22 +341,7 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None):
return _C_ops.gaussian( return _C_ops.gaussian(
shape, float(mean), float(std), seed, dtype, place shape, float(mean), float(std), seed, dtype, place
) )
else:
if _in_legacy_dygraph():
shape = utils.convert_shape_to_list(shape)
return _legacy_C_ops.gaussian_random(
'shape',
shape,
'mean',
float(mean),
'std',
float(std),
'seed',
seed,
'dtype',
dtype,
)
check_shape(shape, op_type_for_check) check_shape(shape, op_type_for_check)
check_dtype(dtype, 'dtype', ['float32', 'float64'], op_type_for_check) check_dtype(dtype, 'dtype', ['float32', 'float64'], op_type_for_check)
...@@ -390,7 +360,10 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None): ...@@ -390,7 +360,10 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None):
helper = LayerHelper('gaussian', **locals()) helper = LayerHelper('gaussian', **locals())
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='gaussian_random', inputs=inputs, outputs={'Out': out}, attrs=attrs type='gaussian_random',
inputs=inputs,
outputs={'Out': out},
attrs=attrs,
) )
out.stop_gradient = True out.stop_gradient = True
return out return out
...@@ -550,7 +523,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None): ...@@ -550,7 +523,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
# [1.00780561 3.78457445 5.81058198] # random # [1.00780561 3.78457445 5.81058198] # random
""" """
if not paddle.in_dynamic_mode(): if not in_dygraph_mode():
check_type(mean, 'mean', (int, float, Variable), 'normal') check_type(mean, 'mean', (int, float, Variable), 'normal')
check_type(std, 'std', (int, float, Variable), 'normal') check_type(std, 'std', (int, float, Variable), 'normal')
if isinstance(mean, Variable): if isinstance(mean, Variable):
...@@ -588,7 +561,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None): ...@@ -588,7 +561,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
return gaussian(shape=shape, mean=mean, std=std, name=name) return gaussian(shape=shape, mean=mean, std=std, name=name)
out = out * std + mean out = out * std + mean
if not paddle.in_dynamic_mode(): if not in_dygraph_mode():
out.stop_grediant = True out.stop_grediant = True
return out return out
...@@ -680,22 +653,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): ...@@ -680,22 +653,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
seed, seed,
_current_expected_place(), _current_expected_place(),
) )
else:
if _in_legacy_dygraph():
shape = utils.convert_shape_to_list(shape)
return _legacy_C_ops.uniform_random(
'shape',
shape,
'min',
float(min),
'max',
float(max),
'seed',
seed,
'dtype',
dtype,
)
check_type(shape, 'shape', (list, tuple, Variable), 'uniform/rand') check_type(shape, 'shape', (list, tuple, Variable), 'uniform/rand')
check_dtype(dtype, 'dtype', ('float32', 'float64'), 'uniform/rand') check_dtype(dtype, 'dtype', ('float32', 'float64'), 'uniform/rand')
check_type(min, 'min', (float, int, Variable), 'uniform/rand') check_type(min, 'min', (float, int, Variable), 'uniform/rand')
...@@ -710,7 +668,10 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): ...@@ -710,7 +668,10 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
helper = LayerHelper("uniform", **locals()) helper = LayerHelper("uniform", **locals())
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="uniform_random", inputs=inputs, attrs=attrs, outputs={"Out": out} type="uniform_random",
inputs=inputs,
attrs=attrs,
outputs={"Out": out},
) )
out.stop_gradient = True out.stop_gradient = True
return out return out
...@@ -751,12 +712,7 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None): ...@@ -751,12 +712,7 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random # [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random # [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
""" """
if in_dygraph_mode():
return _C_ops.uniform_inplace_(x, min, max, seed, 0, 0, 1.0) return _C_ops.uniform_inplace_(x, min, max, seed, 0, 0, 1.0)
else:
return _legacy_C_ops.uniform_random_inplace_(
x, 'min', min, 'max', max, 'seed', seed
)
def randint(low=0, high=None, shape=[1], dtype=None, name=None): def randint(low=0, high=None, shape=[1], dtype=None, name=None):
...@@ -841,12 +797,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): ...@@ -841,12 +797,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
shape = utils.convert_shape_to_list(shape) shape = utils.convert_shape_to_list(shape)
place = _current_expected_place() place = _current_expected_place()
return _C_ops.randint(low, high, shape, dtype, place) return _C_ops.randint(low, high, shape, dtype, place)
if _in_legacy_dygraph(): else:
shape = utils.convert_shape_to_list(shape)
return _legacy_C_ops.randint(
'shape', shape, 'low', low, 'high', high, 'seed', 0, 'dtype', dtype
)
check_shape(shape, 'randint') check_shape(shape, 'randint')
check_dtype(dtype, 'dtype', ['int32', 'int64'], 'randint') check_dtype(dtype, 'dtype', ['int32', 'int64'], 'randint')
if low >= high: if low >= high:
...@@ -1015,7 +966,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None): ...@@ -1015,7 +966,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
"high = {1}".format(low, high) "high = {1}".format(low, high)
) )
if paddle.in_dynamic_mode(): if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape) shape = utils.convert_shape_to_list(shape)
out = _legacy_C_ops.randint( out = _legacy_C_ops.randint(
'shape', 'shape',
...@@ -1031,7 +982,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None): ...@@ -1031,7 +982,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
) )
out = paddle.cast(out, dtype) out = paddle.cast(out, dtype)
return out return out
else:
check_shape(shape, 'randint_like') check_shape(shape, 'randint_like')
check_dtype( check_dtype(
dtype, dtype,
...@@ -1095,11 +1046,11 @@ def randperm(n, dtype="int64", name=None): ...@@ -1095,11 +1046,11 @@ def randperm(n, dtype="int64", name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.randperm(n, dtype, _current_expected_place()) return _C_ops.randperm(n, dtype, _current_expected_place())
if _in_legacy_dygraph(): else:
return _legacy_C_ops.randperm('n', n, 'seed', 0, 'dtype', dtype)
if n < 1: if n < 1:
raise ValueError("The input n should be greater than 0 in randperm op.") raise ValueError(
"The input n should be greater than 0 in randperm op."
)
check_dtype( check_dtype(
dtype, 'dtype', ['int64', 'int32', 'float32', 'float64'], 'randperm' dtype, 'dtype', ['int64', 'int32', 'float32', 'float64'], 'randperm'
) )
...@@ -1199,9 +1150,7 @@ def exponential_(x, lam=1.0, name=None): ...@@ -1199,9 +1150,7 @@ def exponential_(x, lam=1.0, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.exponential_(x, lam) return _C_ops.exponential_(x, lam)
elif paddle.in_dynamic_mode(): else:
return _legacy_C_ops.exponential_(x, "lambda", lam)
check_variable_and_dtype(x, "x", ["float32", "float64"], "exponential") check_variable_and_dtype(x, "x", ["float32", "float64"], "exponential")
helper = LayerHelper("exponential", **locals()) helper = LayerHelper("exponential", **locals())
......
...@@ -17,14 +17,12 @@ ...@@ -17,14 +17,12 @@
import numpy as np import numpy as np
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops
from paddle.common_ops_import import VarDesc, Variable from paddle.common_ops_import import VarDesc, Variable
from ..fluid.data_feeder import check_dtype, check_variable_and_dtype from ..fluid.data_feeder import check_dtype, check_variable_and_dtype
from ..fluid.framework import _in_legacy_dygraph
from ..framework import ( from ..framework import (
LayerHelper, LayerHelper,
_non_static_mode,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
core, core,
in_dygraph_mode, in_dygraph_mode,
...@@ -99,12 +97,7 @@ def argsort(x, axis=-1, descending=False, name=None): ...@@ -99,12 +97,7 @@ def argsort(x, axis=-1, descending=False, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
_, ids = _C_ops.argsort(x, axis, descending) _, ids = _C_ops.argsort(x, axis, descending)
return ids return ids
else:
if _in_legacy_dygraph():
_, ids = _legacy_C_ops.argsort(
x, 'axis', axis, 'descending', descending
)
return ids
check_variable_and_dtype( check_variable_and_dtype(
x, x,
'x', 'x',
...@@ -187,20 +180,7 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None): ...@@ -187,20 +180,7 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.argmax(x, axis, keepdim, flatten, var_dtype) return _C_ops.argmax(x, axis, keepdim, flatten, var_dtype)
if _in_legacy_dygraph(): else:
out = _legacy_C_ops.arg_max(
x,
'axis',
axis,
'dtype',
var_dtype,
'keepdims',
keepdim,
'flatten',
flatten,
)
return out
helper = LayerHelper("argmax", **locals()) helper = LayerHelper("argmax", **locals())
check_variable_and_dtype( check_variable_and_dtype(
x, x,
...@@ -281,20 +261,7 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None): ...@@ -281,20 +261,7 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.argmin(x, axis, keepdim, flatten, var_dtype) return _C_ops.argmin(x, axis, keepdim, flatten, var_dtype)
if _in_legacy_dygraph(): else:
out = _legacy_C_ops.arg_min(
x,
'axis',
axis,
'dtype',
var_dtype,
'keepdims',
keepdim,
'flatten',
flatten,
)
return out
helper = LayerHelper("argmin", **locals()) helper = LayerHelper("argmin", **locals())
check_variable_and_dtype( check_variable_and_dtype(
x, x,
...@@ -354,10 +321,7 @@ def index_select(x, index, axis=0, name=None): ...@@ -354,10 +321,7 @@ def index_select(x, index, axis=0, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.index_select(x, index, axis) return _C_ops.index_select(x, index, axis)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.index_select(x, index, 'dim', axis)
helper = LayerHelper("index_select", **locals()) helper = LayerHelper("index_select", **locals())
check_variable_and_dtype( check_variable_and_dtype(
x, x,
...@@ -366,7 +330,10 @@ def index_select(x, index, axis=0, name=None): ...@@ -366,7 +330,10 @@ def index_select(x, index, axis=0, name=None):
'paddle.tensor.search.index_select', 'paddle.tensor.search.index_select',
) )
check_variable_and_dtype( check_variable_and_dtype(
index, 'index', ['int32', 'int64'], 'paddle.tensor.search.index_select' index,
'index',
['int32', 'int64'],
'paddle.tensor.search.index_select',
) )
out = helper.create_variable_for_type_inference(x.dtype) out = helper.create_variable_for_type_inference(x.dtype)
...@@ -438,8 +405,6 @@ def nonzero(x, as_tuple=False): ...@@ -438,8 +405,6 @@ def nonzero(x, as_tuple=False):
if in_dygraph_mode(): if in_dygraph_mode():
outs = _C_ops.nonzero(x) outs = _C_ops.nonzero(x)
elif paddle.in_dynamic_mode():
outs = _legacy_C_ops.where_index(x)
else: else:
helper = LayerHelper("where_index", **locals()) helper = LayerHelper("where_index", **locals())
...@@ -522,12 +487,7 @@ def sort(x, axis=-1, descending=False, name=None): ...@@ -522,12 +487,7 @@ def sort(x, axis=-1, descending=False, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
outs, _ = _C_ops.argsort(x, axis, descending) outs, _ = _C_ops.argsort(x, axis, descending)
return outs return outs
else:
if _in_legacy_dygraph():
outs, _ = _legacy_C_ops.argsort(
x, 'axis', axis, 'descending', descending
)
return outs
helper = LayerHelper("sort", **locals()) helper = LayerHelper("sort", **locals())
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=False dtype=x.dtype, stop_gradient=False
...@@ -577,9 +537,7 @@ def mode(x, axis=-1, keepdim=False, name=None): ...@@ -577,9 +537,7 @@ def mode(x, axis=-1, keepdim=False, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.mode(x, axis, keepdim) return _C_ops.mode(x, axis, keepdim)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.mode(x, "axis", axis, "keepdim", keepdim)
helper = LayerHelper("mode", **locals()) helper = LayerHelper("mode", **locals())
inputs = {"X": [x]} inputs = {"X": [x]}
attrs = {} attrs = {}
...@@ -687,11 +645,6 @@ def where(condition, x=None, y=None, name=None): ...@@ -687,11 +645,6 @@ def where(condition, x=None, y=None, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.where(broadcast_condition, broadcast_x, broadcast_y) return _C_ops.where(broadcast_condition, broadcast_x, broadcast_y)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.where(
broadcast_condition, broadcast_x, broadcast_y
)
else: else:
helper = LayerHelper("where", **locals()) helper = LayerHelper("where", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
...@@ -784,9 +737,6 @@ def index_sample(x, index): ...@@ -784,9 +737,6 @@ def index_sample(x, index):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.index_sample(x, index) return _C_ops.index_sample(x, index)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.index_sample(x, index)
else: else:
helper = LayerHelper("index_sample", **locals()) helper = LayerHelper("index_sample", **locals())
check_variable_and_dtype( check_variable_and_dtype(
...@@ -843,9 +793,7 @@ def masked_select(x, mask, name=None): ...@@ -843,9 +793,7 @@ def masked_select(x, mask, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.masked_select(x, mask) return _C_ops.masked_select(x, mask)
if _in_legacy_dygraph(): else:
return _legacy_C_ops.masked_select(x, mask)
helper = LayerHelper("masked_select", **locals()) helper = LayerHelper("masked_select", **locals())
check_variable_and_dtype( check_variable_and_dtype(
x, x,
...@@ -858,7 +806,9 @@ def masked_select(x, mask, name=None): ...@@ -858,7 +806,9 @@ def masked_select(x, mask, name=None):
) )
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='masked_select', inputs={'X': x, 'Mask': mask}, outputs={'Y': out} type='masked_select',
inputs={'X': x, 'Mask': mask},
outputs={'Y': out},
) )
return out return out
...@@ -916,26 +866,7 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None): ...@@ -916,26 +866,7 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
axis = -1 axis = -1
out, indices = _C_ops.topk(x, k, axis, largest, sorted) out, indices = _C_ops.topk(x, k, axis, largest, sorted)
return out, indices return out, indices
if _non_static_mode():
if axis is None:
out, indices = _legacy_C_ops.top_k_v2(
x, 'k', int(k), 'largest', largest, 'sorted', sorted
)
else: else:
out, indices = _legacy_C_ops.top_k_v2(
x,
'k',
int(k),
'axis',
axis,
'largest',
largest,
'sorted',
sorted,
)
return out, indices
helper = LayerHelper("top_k_v2", **locals()) helper = LayerHelper("top_k_v2", **locals())
inputs = {"X": [x]} inputs = {"X": [x]}
attrs = {} attrs = {}
...@@ -1065,12 +996,7 @@ def searchsorted( ...@@ -1065,12 +996,7 @@ def searchsorted(
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.searchsorted(sorted_sequence, values, out_int32, right) return _C_ops.searchsorted(sorted_sequence, values, out_int32, right)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.searchsorted(
sorted_sequence, values, "out_int32", out_int32, "right", right
)
check_variable_and_dtype( check_variable_and_dtype(
sorted_sequence, sorted_sequence,
'SortedSequence', 'SortedSequence',
...@@ -1135,16 +1061,10 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None): ...@@ -1135,16 +1061,10 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None):
# [[0, 2], # [[0, 2],
# [1, 2]])) # [1, 2]]))
""" """
if _non_static_mode(): if in_dygraph_mode():
if axis is not None: if axis is not None:
if _in_legacy_dygraph():
return _legacy_C_ops.kthvalue(
x, 'k', k, "axis", axis, "keepdim", keepdim
)
return _C_ops.kthvalue(x, k, axis, keepdim) return _C_ops.kthvalue(x, k, axis, keepdim)
else: else:
if _in_legacy_dygraph():
return _legacy_C_ops.kthvalue(x, 'k', k, "keepdim", keepdim)
return _C_ops.kthvalue(x, k, -1, keepdim) return _C_ops.kthvalue(x, k, -1, keepdim)
helper = LayerHelper("kthvalue", **locals()) helper = LayerHelper("kthvalue", **locals())
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode from paddle.fluid.framework import in_dygraph_mode
from ..fluid.data_feeder import check_type, check_variable_and_dtype from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..framework import LayerHelper, core from ..framework import LayerHelper, core
...@@ -81,13 +81,8 @@ def mean(x, axis=None, keepdim=False, name=None): ...@@ -81,13 +81,8 @@ def mean(x, axis=None, keepdim=False, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.mean(x, axis, keepdim) return _C_ops.mean(x, axis, keepdim)
else:
reduce_all, axis = _get_reduce_axis_with_tensor(axis, x) reduce_all, axis = _get_reduce_axis_with_tensor(axis, x)
if _in_legacy_dygraph():
return _legacy_C_ops.reduce_mean(
x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all
)
check_variable_and_dtype( check_variable_and_dtype(
x, x,
'x/input', 'x/input',
...@@ -111,7 +106,10 @@ def mean(x, axis=None, keepdim=False, name=None): ...@@ -111,7 +106,10 @@ def mean(x, axis=None, keepdim=False, name=None):
attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all} attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}
out = helper.create_variable_for_type_inference(x.dtype) out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op( helper.append_op(
type='reduce_mean', inputs={'X': x}, outputs={'Out': out}, attrs=attrs type='reduce_mean',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs,
) )
return out return out
...@@ -146,7 +144,7 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None): ...@@ -146,7 +144,7 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None):
out2 = paddle.var(x, axis=1) out2 = paddle.var(x, axis=1)
# [1. 4.33333333] # [1. 4.33333333]
""" """
if not paddle.in_dynamic_mode(): if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'var') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'var')
u = mean(x, axis, True, name) u = mean(x, axis, True, name)
...@@ -211,7 +209,7 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None): ...@@ -211,7 +209,7 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None):
# [1. 2.081666] # [1. 2.081666]
""" """
if not paddle.in_dynamic_mode(): if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'std') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'std')
out = var(**locals()) out = var(**locals())
...@@ -243,9 +241,7 @@ def numel(x, name=None): ...@@ -243,9 +241,7 @@ def numel(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.numel(x) return _C_ops.numel(x)
elif _in_legacy_dygraph(): else:
return _legacy_C_ops.size(x)
if not isinstance(x, Variable): if not isinstance(x, Variable):
raise TypeError("x must be a Tensor in numel") raise TypeError("x must be a Tensor in numel")
helper = LayerHelper('numel', **locals()) helper = LayerHelper('numel', **locals())
...@@ -331,14 +327,17 @@ def nanmedian(x, axis=None, keepdim=True, name=None): ...@@ -331,14 +327,17 @@ def nanmedian(x, axis=None, keepdim=True, name=None):
if len(axis) != len(set(axis)): if len(axis) != len(set(axis)):
raise ValueError("Axis has duplicated elements.") raise ValueError("Axis has duplicated elements.")
if _in_legacy_dygraph(): if in_dygraph_mode():
median_index, out = _legacy_C_ops.nanmedian( median_index, out = _legacy_C_ops.nanmedian(
x, 'axis', axis, 'keepdim', keepdim x, 'axis', axis, 'keepdim', keepdim
) )
return out return out
else:
check_variable_and_dtype( check_variable_and_dtype(
x, 'X', ['int32', 'int64', 'float16', 'float32', 'float64'], 'nanmedian' x,
'X',
['int32', 'int64', 'float16', 'float32', 'float64'],
'nanmedian',
) )
helper = LayerHelper('nanmedian', **locals()) helper = LayerHelper('nanmedian', **locals())
...@@ -534,7 +533,7 @@ def _compute_quantile(x, q, axis=None, keepdim=False, ignore_nan=False): ...@@ -534,7 +533,7 @@ def _compute_quantile(x, q, axis=None, keepdim=False, ignore_nan=False):
for q_num in q: for q_num in q:
if q_num < 0 or q_num > 1: if q_num < 0 or q_num > 1:
raise ValueError("q should be in range [0, 1]") raise ValueError("q should be in range [0, 1]")
if paddle.in_dynamic_mode(): if in_dygraph_mode():
q_num = paddle.to_tensor(q_num, dtype='float64') q_num = paddle.to_tensor(q_num, dtype='float64')
if ignore_nan: if ignore_nan:
indices.append(q_num * (valid_counts - 1)) indices.append(q_num * (valid_counts - 1))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册