diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py index 8c81958677c81009f3bfc8714d0b2ac30679849e..eaf5b9c84f79b19cf083418a1903f439f5921436 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/eager_gen.py @@ -687,6 +687,7 @@ def GenerateNodeCreationCodes( pass_stop_gradient_args_list = ["false"] num_fwd_outputs = len(forward_outputs_position_map.keys()) for name, (rtype, pos) in forward_outputs_position_map.items(): + print("@@@@", fwd_api_name, name, rtype, pos) output_autograd_meta_name = GetAutoGradMetaName(name) output_autograd_meta_vec_name = GetAutoGradMetaVectorName(name) if num_fwd_outputs == 1: @@ -727,19 +728,27 @@ def GenerateNodeCreationCodes( # SetTensorWrappers set_tensor_wrappers_list = [] + fwd_api_input_num = 0 for name, (atype, is_fwd_input, pos) in backward_fwd_input_map.items(): is_optional = (name in optional_inputs) if is_fwd_input: + fwd_api_input_num += 1 if is_optional: set_tensor_wrappers = f" if({name}.is_initialized()) grad_node->SetTensorWrapper{name}({name}, true);" else: set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({name}, true);" else: - if IsVectorTensorType(atype): - tw_name = f"api_result[{pos}]" + print("!!!!", fwd_api_name, name, atype, pos) + if num_fwd_outputs == 1: + if IsVectorTensorType(atype): + tw_name = f"std::get<{pos}>(api_result)" + else: + tw_name = f"api_result" else: - tw_name = f"api_result" + assert IsPlainTensorType(atype), atype + out_pos = pos - fwd_api_input_num + tw_name = f"std::get<{out_pos}>(api_result)" if is_optional: set_tensor_wrappers = f" if({tw_name}.is_initialized()) grad_node->SetTensorWrapper{name}({tw_name}, false);" @@ -779,7 +788,7 @@ def GenerateNodeCreationCodes( if num_outputs == 1: set_retain_grad = f" egr::EagerUtils::CheckAndRetainGrad(api_result);" else: - set_retain_grad = f" egr::EagerUtils::CheckAndRetainGrad(api_result[{pos}]);" + set_retain_grad = f" egr::EagerUtils::CheckAndRetainGrad(std::get<{pos}>(api_result));" set_retain_grad_list.append(set_retain_grad) set_out_rank_str = "\n".join(set_out_rank_list) set_history_str = "\n".join(set_history_list) @@ -902,7 +911,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name, returns_list[0] = f"api_result" else: # Tuple api_result - returns_list[pos] = f"api_result[{pos}]" + returns_list[pos] = f"std::get<{pos}>(api_result)" if IsPlainTensorType(rtype): returns_type_list[pos] = "paddle::experimental::Tensor" diff --git a/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py index 0b8bbfcb9eaceef5533a7725fcc5ada17d8e39a7..b4b19c52a348be960758a50926803c9ed669eef6 100644 --- a/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py +++ b/paddle/fluid/eager/auto_code_generator/final_state_generator/python_c_gen.py @@ -22,7 +22,7 @@ atype_to_parsing_function = { "bool": "CastPyArg2Boolean", "int": "CastPyArg2Int", "long": "CastPyArg2Long", - "std::string": "CastPyArgs2String", + "std::string": "CastPyArg2String", "int64_t": "CastPyArg2Long", "float": "CastPyArg2Float", "string": "CastPyArg2String", diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 217edad0c0a105cc649c6c8c4433b0c8eab0119b..795f75ce7fd6d9072d4d7a936b027a42321271b8 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -825,7 +825,7 @@ paddle::experimental::ScalarArray CastPyArg2ScalarArray( // obj could be: int, float, bool, paddle.Tensor PyTypeObject* type = obj->ob_type; auto type_name = std::string(type->tp_name); - if (type_name == "list") { + if (type_name == "list" || type_name == "tuple") { std::vector value = CastPyArg2Ints(obj, op_type, arg_pos); return paddle::experimental::ScalarArray(value); diff --git a/paddle/phi/kernels/cpu/accuracy_kernel.cc b/paddle/phi/kernels/cpu/accuracy_kernel.cc index c57ec69b73a230df48411f4074935e2bb4bce461..6ff8a1f7558973965f51f42bdd0984757f285b47 100644 --- a/paddle/phi/kernels/cpu/accuracy_kernel.cc +++ b/paddle/phi/kernels/cpu/accuracy_kernel.cc @@ -69,4 +69,7 @@ void AccuracyRawKernel(const Context& dev_ctx, // TODO(add supported dtype.) PD_REGISTER_KERNEL( - accuracy, CPU, ALL_LAYOUT, phi::AccuracyRawKernel, float, double) {} + accuracy, CPU, ALL_LAYOUT, phi::AccuracyRawKernel, float, double) { + kernel->InputAt(1).SetDataType(phi::DataType::INT64); + kernel->InputAt(2).SetDataType(phi::DataType::INT64); +} diff --git a/paddle/phi/kernels/cpu/put_along_axis_kernel.cc b/paddle/phi/kernels/cpu/put_along_axis_kernel.cc index 83c9a915ee6357c4462f64b1e193e546973560ce..d47c2bbbca6d9801ce2ec05daa86cf6a901e9fb6 100644 --- a/paddle/phi/kernels/cpu/put_along_axis_kernel.cc +++ b/paddle/phi/kernels/cpu/put_along_axis_kernel.cc @@ -84,4 +84,6 @@ PD_REGISTER_KERNEL(put_along_axis, double, int, uint8_t, - int64_t) {} + int64_t) { + kernel->InputAt(1).SetDataType(phi::DataType::ALL_DTYPE); +} diff --git a/paddle/phi/kernels/cpu/take_along_axis_kernel.cc b/paddle/phi/kernels/cpu/take_along_axis_kernel.cc index 502db8a22da0bc9ab475243b4d8f646be51ed9d5..d5bf5c5cab6974b101a42128a1c69b1fd8dfe741 100644 --- a/paddle/phi/kernels/cpu/take_along_axis_kernel.cc +++ b/paddle/phi/kernels/cpu/take_along_axis_kernel.cc @@ -57,4 +57,6 @@ PD_REGISTER_KERNEL(take_along_axis, double, int, uint8_t, - int64_t) {} + int64_t) { + kernel->InputAt(1).SetDataType(phi::DataType::ALL_DTYPE); +} diff --git a/paddle/phi/kernels/gpu/accuracy_kernel.cu b/paddle/phi/kernels/gpu/accuracy_kernel.cu index f08fb74e54d8c86f7b54d21c762e30cebedfe967..5eecfce09324857b53cfa462d8a65b60c27efb7d 100644 --- a/paddle/phi/kernels/gpu/accuracy_kernel.cu +++ b/paddle/phi/kernels/gpu/accuracy_kernel.cu @@ -114,4 +114,7 @@ PD_REGISTER_KERNEL(accuracy, phi::AccuracyRawKernel, phi::dtype::float16, float, - double) {} + double) { + kernel->InputAt(1).SetDataType(phi::DataType::INT64); + kernel->InputAt(2).SetDataType(phi::DataType::INT64); +} diff --git a/paddle/phi/kernels/gpu/put_along_axis_kernel.cu b/paddle/phi/kernels/gpu/put_along_axis_kernel.cu index d363c0c28364c065117fe53967234484871979af..c46eb73b2f9c878cf591e38c2b10279ccc581cfc 100644 --- a/paddle/phi/kernels/gpu/put_along_axis_kernel.cu +++ b/paddle/phi/kernels/gpu/put_along_axis_kernel.cu @@ -83,4 +83,6 @@ PD_REGISTER_KERNEL(put_along_axis, double, int64_t, int, - phi::dtype::float16) {} + phi::dtype::float16) { + kernel->InputAt(1).SetDataType(phi::DataType::ALL_DTYPE); +} diff --git a/paddle/phi/kernels/gpu/take_along_axis_kernel.cu b/paddle/phi/kernels/gpu/take_along_axis_kernel.cu index 9665a917d9dc4a5fa0d350f8fb635ecec79b7832..b1cf98102253ac8129e052effe1e3d71c70503ba 100644 --- a/paddle/phi/kernels/gpu/take_along_axis_kernel.cu +++ b/paddle/phi/kernels/gpu/take_along_axis_kernel.cu @@ -57,4 +57,6 @@ PD_REGISTER_KERNEL(take_along_axis, double, int64_t, int, - phi::dtype::float16) {} + phi::dtype::float16) { + kernel->InputAt(1).SetDataType(phi::DataType::ALL_DTYPE); +} diff --git a/paddle/phi/kernels/norm_grad_kernel.h b/paddle/phi/kernels/norm_grad_kernel.h index 55714b8a4a091f6d64cbb9a03eb9043d4c2dbf22..a67e757ba510f03f211cf383cc68b38e3099ae3c 100644 --- a/paddle/phi/kernels/norm_grad_kernel.h +++ b/paddle/phi/kernels/norm_grad_kernel.h @@ -21,7 +21,7 @@ namespace phi { template void NormGradKernel(const Context& ctx, const DenseTensor& x, - const DenseTensor& out, + const DenseTensor& norm, const DenseTensor& out_grad, int axis, float epsilon, diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 668cb01549f6c5665783f7b3219f34f537ce1a15..51d5b4ddb91025eb15037fbe23dae3849aa1d78b 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -18,7 +18,7 @@ from ..wrapped_decorator import signature_safe_contextmanager from .layer_function_generator import autodoc, templatedoc from .tensor import assign, cast, fill_constant from .. import core -from ..framework import Program, Variable, Operator, in_dygraph_mode, static_only +from ..framework import Program, Variable, Operator, in_dygraph_mode, static_only, _in_eager_mode from ..layer_helper import LayerHelper, unique_name from .nn import logical_and, logical_not, logical_or from .utils import assert_same_structure, map_structure, hold_mutable_vars, copy_mutable_vars @@ -3852,6 +3852,8 @@ def is_empty(x, name=None): """ if in_dygraph_mode(): + if _in_eager_mode(): + return _C_ops.final_state_is_empty(x) return _C_ops.is_empty(x) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 676ee3e3c774eab144468ce42f3c839daa948e4d..ccd184e15c0cef42880a5893efa4e60b0bf51c2a 100755 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -258,6 +258,9 @@ def generate_activation_fn(op_type): def func(x, name=None): if in_dygraph_mode(): + if _in_eager_mode(): + op = getattr(_C_ops, "final_state_" + op_type) + return op(x) op = getattr(_C_ops, op_type) return op(x) diff --git a/python/paddle/fluid/layers/loss.py b/python/paddle/fluid/layers/loss.py index 07ed02181e8e1c84a5dbbccf0c95f057e41d58a5..1cec03a038c40566c4706d646493aa3e0738530b 100644 --- a/python/paddle/fluid/layers/loss.py +++ b/python/paddle/fluid/layers/loss.py @@ -1458,6 +1458,10 @@ def sigmoid_cross_entropy_with_logits(x, ignore_index=-1, normalize=True) print(loss) """ + if in_dygraph_mode() and _in_eager_mode(): + return _C_ops.final_state_sigmoid_cross_entropy_with_logits( + x, label, normalize, ignore_index) + check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], 'sigmoid_cross_entropy_with_logits') diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 0736ba72f007986e94843506a5029cb4ca4516ac..c96d76fc6f98c84e6e47f3922eb03468436449fa 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -11571,6 +11571,8 @@ def size(input): """ if in_dygraph_mode(): + if _in_eager_mode(): + return _C_ops.final_state_size(input) return _C_ops.size(input) check_variable_and_dtype( input, 'input', @@ -12543,7 +12545,8 @@ def logical_not(x, out=None, name=None): res = paddle.logical_not(x) print(res) # [False True False True] """ - + if paddle.in_dygraph_mode() and _in_eager_mode(): + return _C_ops.final_state_logical_not(x) return _logical_op( op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False) @@ -13319,6 +13322,9 @@ def log_loss(input, label, epsilon=1e-4, name=None): prob = paddle.randn((10,1)) cost = F.log_loss(input=prob, label=label) """ + if in_dygraph_mode() and _in_eager_mode(): + return _C_ops.final_state_log_loss(input, label, epsilon) + helper = LayerHelper('log_loss', **locals()) check_variable_and_dtype(input, 'input', ['float32'], 'log_loss') check_variable_and_dtype(label, 'label', ['float32'], 'log_loss') @@ -14335,6 +14341,8 @@ def where(condition): """ if in_dygraph_mode(): + if _in_eager_mode(): + return _C_ops.final_state_where_index(condition) return _C_ops.where_index(condition) helper = LayerHelper("where_index", **locals()) @@ -14829,8 +14837,8 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): if in_dygraph_mode(): if _in_eager_mode(): - return _C_op.final_state_unfold(x, kernel_sizes, strdides, paddings, - dilations) + return _C_ops.final_state_unfold(x, kernel_sizes, strdides, + paddings, dilations) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( @@ -15059,6 +15067,10 @@ def shard_index(input, index_num, nshards, shard_id, ignore_value=-1): print(shard_label) # [[-1], [1]] """ + if in_dygraph_mode() and _in_eager_mode(): + return _C_ops.final_state_shard_index(input, index_num, nshards, + shard_id, ignore_value) + check_variable_and_dtype(input, 'input', ['int64', 'int32'], 'shard_index') op_type = 'shard_index' helper = LayerHelper(op_type, **locals()) diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 05c0b03a025c6b6ec31966cb5c24130b9c04f33e..85f12cf1f4f95da1fa00fcafa9b6b050a926b23a 100755 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -16,9 +16,10 @@ from __future__ import print_function import os from .layer_function_generator import generate_layer_fn, generate_activation_fn, generate_inplace_fn, add_sample_code from .. import core -from ..framework import convert_np_dtype_to_dtype_, Variable +from ..framework import convert_np_dtype_to_dtype_, Variable, in_dygraph_mode, _in_eager_mode from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype from paddle.utils import deprecated +from paddle import _C_ops __deprecated_func_name__ = { 'tanh_shrink': 'tanhshrink', @@ -794,6 +795,10 @@ _erf_ = generate_layer_fn('erf') def erf(x, name=None): + if in_dygraph_mode(): + if _in_eager_mode(): + return _C_ops.final_state_erf(x) + locals_var = locals().copy() kwargs = dict() for name, val in locals_var.items(): diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 4fbc34533c668e2135ffadb184ebfd5e01776de5..673333fdff78b6dfaeb924155a5809008e075acd 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -545,7 +545,6 @@ class OpTest(unittest.TestCase): v.value().get_tensor().set_recursive_sequence_lengths(lod) return v else: - print("init her") return paddle.to_tensor(value) def get_sequence_batch_size_1_input(self, lod=None, shape=None): @@ -1502,14 +1501,14 @@ class OpTest(unittest.TestCase): .recursive_sequence_lengths(), expect[1], "Output (" + out_name + ") has different lod at " + str(place) + " in eager dygraph mode") - if check_eager: - with _test_eager_guard(): - self.assertListEqual( - eager_imperative_actual.value().get_tensor() - .recursive_sequence_lengths(), expect[1], - "Output (" + out_name + - ") has different lod at " + str(place) + - " in eager dygraph mode") + with fluid.dygraph.base.guard(): + with _test_eager_guard(): + self.assertListEqual( + eager_imperative_actual.value().get_tensor() + .recursive_sequence_lengths(), expect[1], + "Output (" + out_name + + ") has different lod at " + str(place) + + " in eager dygraph mode") # Note(zhiqiu): inplace_atol should be only set when op doesn't ensure # computational consistency. diff --git a/python/paddle/fluid/tests/unittests/test_accuracy_op.py b/python/paddle/fluid/tests/unittests/test_accuracy_op.py index eb77f84099cfb5e92c2d570e2e57390b4a1df54b..c6a7cdf1a00a0f9a9427f23ea39bbdd57736e062 100755 --- a/python/paddle/fluid/tests/unittests/test_accuracy_op.py +++ b/python/paddle/fluid/tests/unittests/test_accuracy_op.py @@ -20,6 +20,7 @@ from op_test import OpTest import paddle import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard +from paddle.fluid.framework import _test_eager_guard class TestAccuracyOp(OpTest): @@ -49,7 +50,7 @@ class TestAccuracyOp(OpTest): pass def test_check_output(self): - self.check_output(check_eager=False) + self.check_output(check_eager=True) class TestAccuracyOpFp16(TestAccuracyOp): @@ -57,7 +58,7 @@ class TestAccuracyOpFp16(TestAccuracyOp): self.dtype = np.float16 def test_check_output(self): - self.check_output(atol=1e-3) + self.check_output(atol=1e-3, check_eager=True) class TestAccuracyOpError(unittest.TestCase): @@ -127,6 +128,17 @@ class TestAccuracyAPI(unittest.TestCase): self.assertEqual((result.numpy() == expect_value).all(), True) + with _test_eager_guard(): + predictions = paddle.to_tensor( + [[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], + dtype='float32') + label = paddle.to_tensor([[2], [0]], dtype="int64") + result = paddle.metric.accuracy( + input=predictions, label=label, k=1) + expect_value = np.array([0.5], dtype='float32') + + self.assertEqual((result.numpy() == expect_value).all(), True) + if __name__ == '__main__': paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 5c40b898d2325ba97c42807ed77be91dc76aa623..cc8fa345e7b67e1a560562e8d9a7797c15e42c6a 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -59,12 +59,17 @@ class TestActivation(OpTest): self.outputs = {'Out': out} def test_check_output(self): - self.check_output() + self.check_output(check_eager=(hasattr(self, "python_api") and + self.python_api != None)) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad( + ['X'], + 'Out', + check_eager=(hasattr(self, "python_api") and + self.python_api != None)) def init_dtype(self): self.dtype = np.float64 @@ -356,6 +361,7 @@ class TestTanh(TestActivation, TestParameter): def setUp(self): self.op_type = "tanh" self.init_dtype() + self.python_api = paddle.tanh np.random.seed(1024) x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) out = np.tanh(x) @@ -366,7 +372,7 @@ class TestTanh(TestActivation, TestParameter): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) def init_dtype(self): #TODO If dtype is float64, the output (Out) has diff at CPUPlace @@ -449,6 +455,7 @@ class TestAtan(TestActivation, TestParameter): self.op_type = "atan" self.init_dtype() + self.python_api = paddle.atan np.random.seed(1024) x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) out = np.arctan(x) @@ -459,7 +466,7 @@ class TestAtan(TestActivation, TestParameter): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) def test_out_name(self): with fluid.program_guard(fluid.Program()): @@ -485,6 +492,7 @@ class TestSinh(TestActivation): def setUp(self): self.op_type = "sinh" self.init_dtype() + self.python_api = paddle.sinh np.random.seed(1024) x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) @@ -496,7 +504,7 @@ class TestSinh(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) def test_dygraph(self): with fluid.dygraph.guard(): @@ -557,6 +565,7 @@ class TestCosh(TestActivation): def setUp(self): self.op_type = "cosh" self.init_dtype() + self.python_api = paddle.cosh np.random.seed(1024) x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) @@ -568,7 +577,7 @@ class TestCosh(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) def test_dygraph(self): with fluid.dygraph.guard(): @@ -1082,6 +1091,7 @@ class TestCos(TestActivation): def setUp(self): self.op_type = "cos" self.init_dtype() + self.python_api = paddle.cos np.random.seed(1024) x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) @@ -1093,7 +1103,7 @@ class TestCos(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestTan(TestActivation): @@ -1151,6 +1161,7 @@ class TestAcos(TestActivation): def setUp(self): self.op_type = "acos" self.init_dtype() + self.python_api = paddle.acos np.random.seed(1024) x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype) @@ -1162,13 +1173,14 @@ class TestAcos(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestSin(TestActivation, TestParameter): def setUp(self): self.op_type = "sin" self.init_dtype() + self.python_api = paddle.sin np.random.seed(1024) x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) @@ -1180,13 +1192,14 @@ class TestSin(TestActivation, TestParameter): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestAsin(TestActivation): def setUp(self): self.op_type = "asin" self.init_dtype() + self.python_api = paddle.asin np.random.seed(2048) x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype) @@ -1198,13 +1211,14 @@ class TestAsin(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestAcosh(TestActivation): def setUp(self): self.op_type = "acosh" self.init_dtype() + self.python_api = paddle.acosh np.random.seed(1024) x = np.random.uniform(2, 3, [10, 12]).astype(self.dtype) @@ -1216,13 +1230,14 @@ class TestAcosh(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestAsinh(TestActivation): def setUp(self): self.op_type = "asinh" self.init_dtype() + self.python_api = paddle.asinh np.random.seed(1024) x = np.random.uniform(1, 2, [10, 12]).astype(self.dtype) @@ -1234,13 +1249,14 @@ class TestAsinh(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestAtanh(TestActivation): def setUp(self): self.op_type = "atanh" self.init_dtype() + self.python_api = paddle.atanh np.random.seed(400) x = np.random.uniform(-0.9, 0.9, [10, 12]).astype(self.dtype) @@ -1252,7 +1268,7 @@ class TestAtanh(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestRound(TestActivation): @@ -3195,4 +3211,5 @@ def create_test_act_bf16_class(parent, create_test_act_bf16_class(TestRelu) if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_bernoulli_op.py b/python/paddle/fluid/tests/unittests/test_bernoulli_op.py index 426d5d463f4530e7662279db83fe29826d51d775..f2bf8984cf74e40e8f29f530d0698893aeb5e4e7 100644 --- a/python/paddle/fluid/tests/unittests/test_bernoulli_op.py +++ b/python/paddle/fluid/tests/unittests/test_bernoulli_op.py @@ -19,6 +19,7 @@ import paddle from op_test import OpTest import numpy as np import os +from paddle.fluid.framework import _test_eager_guard def output_hist(out): @@ -32,6 +33,7 @@ def output_hist(out): class TestBernoulliOp(OpTest): def setUp(self): self.op_type = "bernoulli" + self.python_api = paddle.bernoulli self.inputs = {"X": np.random.uniform(size=(1000, 784))} self.attrs = {} self.outputs = {"Out": np.zeros((1000, 784)).astype("float32")} @@ -104,8 +106,28 @@ class TestRandomValue(unittest.TestCase): expect = [0., 0., 1., 1., 1., 1., 0., 1., 1., 1.] self.assertTrue(np.array_equal(y[16, 500, 500:510], expect)) + with _test_eager_guard(): + x = paddle.to_tensor(x_np, dtype='float64') + y = paddle.bernoulli(x).numpy() + index0, index1, index2 = np.nonzero(y) + self.assertEqual(np.sum(index0), 260028995) + self.assertEqual(np.sum(index1), 8582429431) + self.assertEqual(np.sum(index2), 8581445798) + expect = [0., 0., 0., 0., 0., 0., 0., 1., 1., 1.] + self.assertTrue(np.array_equal(y[16, 500, 500:510], expect)) + + x = paddle.to_tensor(x_np, dtype='float32') + y = paddle.bernoulli(x).numpy() + index0, index1, index2 = np.nonzero(y) + self.assertEqual(np.sum(index0), 260092343) + self.assertEqual(np.sum(index1), 8583509076) + self.assertEqual(np.sum(index2), 8582778540) + expect = [0., 0., 1., 1., 1., 1., 0., 1., 1., 1.] + self.assertTrue(np.array_equal(y[16, 500, 500:510], expect)) + paddle.enable_static() if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_bincount_op.py b/python/paddle/fluid/tests/unittests/test_bincount_op.py index 851bf7b01125a3de1d1f442de529f2b9a76e31a9..a87feaf68c46005d93338b20fe695765e0cd01a2 100644 --- a/python/paddle/fluid/tests/unittests/test_bincount_op.py +++ b/python/paddle/fluid/tests/unittests/test_bincount_op.py @@ -20,6 +20,7 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import Program, program_guard +from paddle.fluid.framework import _test_eager_guard from op_test import OpTest paddle.enable_static() @@ -62,6 +63,14 @@ class TestBincountOpAPI(unittest.TestCase): (actual.numpy() == expected).all(), msg='bincount output is wrong, out =' + str(actual.numpy())) + with _test_eager_guard(): + inputs = fluid.dygraph.to_variable(inputs_np) + actual = paddle.bincount(inputs) + expected = np.bincount(inputs) + self.assertTrue( + (actual.numpy() == expected).all(), + msg='bincount output is wrong, out =' + str(actual.numpy())) + class TestBincountOpError(unittest.TestCase): """Test bincount op error.""" diff --git a/python/paddle/fluid/tests/unittests/test_bitwise_op.py b/python/paddle/fluid/tests/unittests/test_bitwise_op.py index e154876f6939ef48bcc08d0708b0a452864ccfe0..67070602c2c0061e85e1adc9b6afb98d1b3cf6d8 100644 --- a/python/paddle/fluid/tests/unittests/test_bitwise_op.py +++ b/python/paddle/fluid/tests/unittests/test_bitwise_op.py @@ -289,7 +289,7 @@ class TestBitwiseNot(OpTest): self.outputs = {'Out': out} def test_check_output(self): - self.check_output(check_eager=False) + self.check_output(check_eager=True) def test_check_grad(self): pass diff --git a/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py b/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py index b6656e283ffe77fa00815bf8aca5632621817e03..eada96665f0b8d8c9b53881de552abe724b9f828 100644 --- a/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py @@ -128,132 +128,135 @@ class TestCholeskySolveOp(OpTest): def test_check_output(self): self.check_output(check_eager=True) - # def test_check_grad_normal(self): - # self.check_grad(['Y'], 'Out', max_relative_error=0.01, check_eager=True) - - # # 3D(broadcast) + 3D, upper=True - # class TestCholeskySolveOp3(TestCholeskySolveOp): - # """ - # case 3 - # """ - - # def config(self): - # self.y_shape = [1, 10, 10] - # self.x_shape = [2, 10, 5] - # self.upper = True - # self.dtype = np.float64 - - # class TestCholeskySolveAPI(unittest.TestCase): - # def setUp(self): - # np.random.seed(2021) - # self.place = [paddle.CPUPlace()] - # # self.place = [paddle.CUDAPlace(0)] - # self.dtype = "float64" - # self.upper = True - # if core.is_compiled_with_cuda(): - # self.place.append(paddle.CUDAPlace(0)) - - # def check_static_result(self, place): - # paddle.enable_static() - # with fluid.program_guard(fluid.Program(), fluid.Program()): - # x = fluid.data(name="x", shape=[10, 2], dtype=self.dtype) - # y = fluid.data(name="y", shape=[10, 10], dtype=self.dtype) - # z = paddle.linalg.cholesky_solve(x, y, upper=self.upper) - - # x_np = np.random.random([10, 2]).astype(self.dtype) - # y_np = np.random.random([10, 10]).astype(self.dtype) - # if self.upper: - # umat = np.triu(y_np) - # else: - # umat = np.tril(y_np) - # z_np = cholesky_solution(umat, x_np, upper=self.upper) - # z2_np = scipy_cholesky_solution(umat, x_np, upper=self.upper) - - # exe = fluid.Executor(place) - # fetches = exe.run(fluid.default_main_program(), - # feed={"x": x_np, - # "y": umat}, - # fetch_list=[z]) - # self.assertTrue(np.allclose(fetches[0], z_np)) - - # def test_static(self): - # for place in self.place: - # self.check_static_result(place=place) - - # def test_dygraph(self): - # def run(place): - # paddle.disable_static(place) - # x_np = np.random.random([20, 2]).astype(self.dtype) - # y_np = np.random.random([20, 20]).astype(self.dtype) - # z_np = scipy_cholesky_solution(y_np, x_np, upper=self.upper) - - # x = paddle.to_tensor(x_np) - # y = paddle.to_tensor(y_np) - # z = paddle.linalg.cholesky_solve(x, y, upper=self.upper) - - # self.assertTrue(np.allclose(z_np, z.numpy())) - # self.assertEqual(z_np.shape, z.numpy().shape) - # paddle.enable_static() - - # for idx, place in enumerate(self.place): - # run(place) - - # def test_boardcast(self): - # def run(place): - # paddle.disable_static() - # x_np = np.random.random([1, 30, 2]).astype(self.dtype) - # y_np = np.random.random([2, 30, 30]).astype(self.dtype) - # nx_np = np.concatenate((x_np, x_np), axis=0) - - # z_sci = scipy_cholesky_solution_batch(y_np, nx_np, upper=self.upper) - - # x = paddle.to_tensor(x_np) - # y = paddle.to_tensor(y_np) - # z = paddle.linalg.cholesky_solve(x, y, upper=self.upper) - # self.assertEqual(z_sci.shape, z.numpy().shape) - # self.assertTrue(np.allclose(z_sci, z.numpy())) - - # for idx, place in enumerate(self.place): - # run(place) - - # class TestCholeskySolveOpError(unittest.TestCase): - # def test_errors(self): - # paddle.enable_static() - # with program_guard(Program(), Program()): - # # The input type of solve_op must be Variable. - # x1 = fluid.create_lod_tensor( - # np.array([[-1]]), [[1]], fluid.CPUPlace()) - # y1 = fluid.create_lod_tensor( - # np.array([[-1]]), [[1]], fluid.CPUPlace()) - # self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x1, y1) - - # # The data type of input must be float32 or float64. - # x2 = fluid.data(name="x2", shape=[30, 30], dtype="bool") - # y2 = fluid.data(name="y2", shape=[30, 10], dtype="bool") - # self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x2, y2) - - # x3 = fluid.data(name="x3", shape=[30, 30], dtype="int32") - # y3 = fluid.data(name="y3", shape=[30, 10], dtype="int32") - # self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x3, y3) - - # x4 = fluid.data(name="x4", shape=[30, 30], dtype="float16") - # y4 = fluid.data(name="y4", shape=[30, 10], dtype="float16") - # self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x4, y4) - - # # The number of dimensions of input'X must be >= 2. - # x5 = fluid.data(name="x5", shape=[30], dtype="float64") - # y5 = fluid.data(name="y5", shape=[30, 30], dtype="float64") - # self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x5, y5) - - # # The number of dimensions of input'Y must be >= 2. - # x6 = fluid.data(name="x6", shape=[30, 30], dtype="float64") - # y6 = fluid.data(name="y6", shape=[30], dtype="float64") - # self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x6, y6) - - # # The inner-most 2 dimensions of input'X should be equal to each other - # x7 = fluid.data(name="x7", shape=[2, 3, 4], dtype="float64") - # y7 = fluid.data(name="y7", shape=[2, 4, 3], dtype="float64") - # self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x7, y7) + def test_check_grad_normal(self): + self.check_grad(['Y'], 'Out', max_relative_error=0.01, check_eager=True) + + +# 3D(broadcast) + 3D, upper=True +class TestCholeskySolveOp3(TestCholeskySolveOp): + """ + case 3 + """ + + def config(self): + self.y_shape = [1, 10, 10] + self.x_shape = [2, 10, 5] + self.upper = True + self.dtype = np.float64 + + +class TestCholeskySolveAPI(unittest.TestCase): + def setUp(self): + np.random.seed(2021) + self.place = [paddle.CPUPlace()] + # self.place = [paddle.CUDAPlace(0)] + self.dtype = "float64" + self.upper = True + if core.is_compiled_with_cuda(): + self.place.append(paddle.CUDAPlace(0)) + + def check_static_result(self, place): + paddle.enable_static() + with fluid.program_guard(fluid.Program(), fluid.Program()): + x = fluid.data(name="x", shape=[10, 2], dtype=self.dtype) + y = fluid.data(name="y", shape=[10, 10], dtype=self.dtype) + z = paddle.linalg.cholesky_solve(x, y, upper=self.upper) + + x_np = np.random.random([10, 2]).astype(self.dtype) + y_np = np.random.random([10, 10]).astype(self.dtype) + if self.upper: + umat = np.triu(y_np) + else: + umat = np.tril(y_np) + z_np = cholesky_solution(umat, x_np, upper=self.upper) + z2_np = scipy_cholesky_solution(umat, x_np, upper=self.upper) + + exe = fluid.Executor(place) + fetches = exe.run(fluid.default_main_program(), + feed={"x": x_np, + "y": umat}, + fetch_list=[z]) + self.assertTrue(np.allclose(fetches[0], z_np)) + + def test_static(self): + for place in self.place: + self.check_static_result(place=place) + + def test_dygraph(self): + def run(place): + paddle.disable_static(place) + x_np = np.random.random([20, 2]).astype(self.dtype) + y_np = np.random.random([20, 20]).astype(self.dtype) + z_np = scipy_cholesky_solution(y_np, x_np, upper=self.upper) + + x = paddle.to_tensor(x_np) + y = paddle.to_tensor(y_np) + z = paddle.linalg.cholesky_solve(x, y, upper=self.upper) + + self.assertTrue(np.allclose(z_np, z.numpy())) + self.assertEqual(z_np.shape, z.numpy().shape) + paddle.enable_static() + + for idx, place in enumerate(self.place): + run(place) + + def test_boardcast(self): + def run(place): + paddle.disable_static() + x_np = np.random.random([1, 30, 2]).astype(self.dtype) + y_np = np.random.random([2, 30, 30]).astype(self.dtype) + nx_np = np.concatenate((x_np, x_np), axis=0) + + z_sci = scipy_cholesky_solution_batch(y_np, nx_np, upper=self.upper) + + x = paddle.to_tensor(x_np) + y = paddle.to_tensor(y_np) + z = paddle.linalg.cholesky_solve(x, y, upper=self.upper) + self.assertEqual(z_sci.shape, z.numpy().shape) + self.assertTrue(np.allclose(z_sci, z.numpy())) + + for idx, place in enumerate(self.place): + run(place) + + +class TestCholeskySolveOpError(unittest.TestCase): + def test_errors(self): + paddle.enable_static() + with program_guard(Program(), Program()): + # The input type of solve_op must be Variable. + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace()) + y1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace()) + self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x1, y1) + + # The data type of input must be float32 or float64. + x2 = fluid.data(name="x2", shape=[30, 30], dtype="bool") + y2 = fluid.data(name="y2", shape=[30, 10], dtype="bool") + self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x2, y2) + + x3 = fluid.data(name="x3", shape=[30, 30], dtype="int32") + y3 = fluid.data(name="y3", shape=[30, 10], dtype="int32") + self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x3, y3) + + x4 = fluid.data(name="x4", shape=[30, 30], dtype="float16") + y4 = fluid.data(name="y4", shape=[30, 10], dtype="float16") + self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x4, y4) + + # The number of dimensions of input'X must be >= 2. + x5 = fluid.data(name="x5", shape=[30], dtype="float64") + y5 = fluid.data(name="y5", shape=[30, 30], dtype="float64") + self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x5, y5) + + # The number of dimensions of input'Y must be >= 2. + x6 = fluid.data(name="x6", shape=[30, 30], dtype="float64") + y6 = fluid.data(name="y6", shape=[30], dtype="float64") + self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x6, y6) + + # The inner-most 2 dimensions of input'X should be equal to each other + x7 = fluid.data(name="x7", shape=[2, 3, 4], dtype="float64") + y7 = fluid.data(name="y7", shape=[2, 4, 3], dtype="float64") + self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x7, y7) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index 818e15bb319b10a1ffa6850874a26f412422b574..2b87a501cd0bea4d94e5b3f77c903aadc2fea3d0 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -21,6 +21,7 @@ import paddle import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard +from paddle.fluid.framework import _test_eager_guard class TestCumsumOp(unittest.TestCase): @@ -84,6 +85,9 @@ class TestCumsumOp(unittest.TestCase): def test_cpu(self): paddle.disable_static(paddle.fluid.CPUPlace()) self.run_cases() + + with _test_eager_guard(): + self.run_cases() paddle.enable_static() self.run_static() @@ -107,15 +111,16 @@ class TestCumsumOp(unittest.TestCase): class TestSumOp1(OpTest): def setUp(self): self.op_type = "cumsum" + self.python_api = paddle.cumsum self.attrs = {'axis': 2} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestSumOp2(OpTest): @@ -306,4 +311,5 @@ class BadInputTest(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_eigh_op.py b/python/paddle/fluid/tests/unittests/test_eigh_op.py index 3e8230e5d0c625f8ae066a74449894609fb54ef6..4d202da3f270932bcdfc6db84b684fef90f4237b 100644 --- a/python/paddle/fluid/tests/unittests/test_eigh_op.py +++ b/python/paddle/fluid/tests/unittests/test_eigh_op.py @@ -25,6 +25,7 @@ class TestEighOp(OpTest): def setUp(self): paddle.enable_static() self.op_type = "eigh" + self.python_api = paddle.linalg.eigh self.init_input() self.init_config() np.random.seed(123) @@ -42,10 +43,10 @@ class TestEighOp(OpTest): self.x_np = np.random.random(self.x_shape).astype(self.x_type) def test_check_output(self): - self.check_output(no_check_set=['Eigenvectors']) + self.check_output(no_check_set=['Eigenvectors'], check_eager=True) def test_grad(self): - self.check_grad(["X"], ["Eigenvalues"]) + self.check_grad(["X"], ["Eigenvalues"], check_eager=True) class TestEighUPLOCase(TestEighOp): @@ -207,4 +208,5 @@ class TestEighAPIError(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_erf_op.py b/python/paddle/fluid/tests/unittests/test_erf_op.py index 964e704c6a2ccbdc96fc281f6e417caf8351cdf7..a6e70f0da4c1b4ec4d7e9fd1be8ea9d7a8b2e07a 100644 --- a/python/paddle/fluid/tests/unittests/test_erf_op.py +++ b/python/paddle/fluid/tests/unittests/test_erf_op.py @@ -27,6 +27,7 @@ import paddle.fluid.dygraph as dg class TestErfOp(OpTest): def setUp(self): self.op_type = "erf" + self.python_api = paddle.erf self.dtype = self._init_dtype() self.x_shape = [11, 17] x = np.random.uniform(-1, 1, size=self.x_shape).astype(self.dtype) @@ -38,10 +39,10 @@ class TestErfOp(OpTest): return "float64" def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestErfLayer(unittest.TestCase): @@ -67,4 +68,5 @@ class TestErfLayer(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_erfinv_op.py b/python/paddle/fluid/tests/unittests/test_erfinv_op.py index 847a868dd6ca01f32e9e58cf1620aca50f042602..e14c2e38bb8a482b949192c0f44268304bf2b064 100644 --- a/python/paddle/fluid/tests/unittests/test_erfinv_op.py +++ b/python/paddle/fluid/tests/unittests/test_erfinv_op.py @@ -21,13 +21,13 @@ from op_test import OpTest import paddle import paddle.fluid.core as core -paddle.enable_static() np.random.seed(0) class TestErfinv(OpTest): def setUp(self): self.op_type = "erfinv" + self.python_api = paddle.erfinv self.init_dtype() self.shape = [11, 17] self.x = np.random.uniform(-1, 1, size=self.shape).astype(self.dtype) @@ -42,14 +42,15 @@ class TestErfinv(OpTest): self.dtype = np.float64 def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): self.check_grad( ['X'], 'Out', user_defined_grads=[self.gradient], - user_defined_grad_outputs=self.grad_out) + user_defined_grad_outputs=self.grad_out, + check_eager=True) class TestErfinvFP32(TestErfinv): @@ -108,4 +109,5 @@ class TestErfinvAPI(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_increment.py b/python/paddle/fluid/tests/unittests/test_increment.py index 38f6a546071b0471522b97ca8ffb5ace14ffbed6..e0383b9298f0c354604bf50f7c9b86cedcb7e8da 100755 --- a/python/paddle/fluid/tests/unittests/test_increment.py +++ b/python/paddle/fluid/tests/unittests/test_increment.py @@ -19,6 +19,7 @@ import unittest import numpy as np import paddle import paddle.fluid as fluid +from paddle.fluid.framework import _test_eager_guard class TestIncrement(unittest.TestCase): @@ -39,6 +40,14 @@ class TestIncrement(unittest.TestCase): output = paddle.tensor.math.increment(input, value=1) self.assertEqual((output.numpy() == expected_result).all(), True) + with fluid.dygraph.guard(): + with _test_eager_guard(): + input = paddle.ones(shape=[1], dtype='int64') + expected_result = np.array([2], dtype='int64') + output = paddle.tensor.math.increment(input, value=1) + self.assertEqual((output.numpy() == expected_result).all(), + True) + class TestInplaceApiWithDataTransform(unittest.TestCase): def test_increment(self): @@ -55,4 +64,5 @@ class TestInplaceApiWithDataTransform(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_is_empty_op.py b/python/paddle/fluid/tests/unittests/test_is_empty_op.py index 520e55e9f98d5a673ce17a6633af6ce28141b937..9b9ae71bc76c8c690e87f321195cee632faf869c 100644 --- a/python/paddle/fluid/tests/unittests/test_is_empty_op.py +++ b/python/paddle/fluid/tests/unittests/test_is_empty_op.py @@ -23,11 +23,12 @@ import paddle class TestEmpty(OpTest): def setUp(self): self.op_type = "is_empty" + self.python_api = paddle.is_empty self.inputs = {'X': np.array([1, 2, 3])} self.outputs = {'Out': np.array([False])} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) class TestNotEmpty(TestEmpty): @@ -75,4 +76,5 @@ class TestIsEmptyOpDygraph(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_log_loss_op.py b/python/paddle/fluid/tests/unittests/test_log_loss_op.py index 0c57c0addf261e6cc794f87e65e5dc5d111e3765..af1b29e1032d16d8d81a3c2f0bb18600960cc664 100644 --- a/python/paddle/fluid/tests/unittests/test_log_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_log_loss_op.py @@ -18,6 +18,7 @@ import unittest import numpy as np from op_test import OpTest import paddle.fluid as fluid +import paddle def sigmoid_array(x): @@ -27,6 +28,7 @@ def sigmoid_array(x): class TestLogLossOp(OpTest): def setUp(self): self.op_type = 'log_loss' + self.python_api = paddle.nn.functional.log_loss samples_num = 100 x = np.random.random((samples_num, 1)).astype("float32") @@ -44,10 +46,11 @@ class TestLogLossOp(OpTest): self.outputs = {'Loss': loss} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['Predicted'], 'Loss', max_relative_error=0.03) + self.check_grad( + ['Predicted'], 'Loss', max_relative_error=0.03, check_eager=True) class TestLogLossOpError(unittest.TestCase): @@ -80,4 +83,5 @@ class TestLogLossOpError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_matrix_power_op.py b/python/paddle/fluid/tests/unittests/test_matrix_power_op.py index 96823f49d2f08b094c997af4f81a2e725ab85efb..6b334adf6433b678ce9c64e013cfcf2e047e80d6 100644 --- a/python/paddle/fluid/tests/unittests/test_matrix_power_op.py +++ b/python/paddle/fluid/tests/unittests/test_matrix_power_op.py @@ -31,6 +31,7 @@ class TestMatrixPowerOp(OpTest): def setUp(self): self.op_type = "matrix_power" self.config() + self.python_api = paddle.linalg.matrix_power np.random.seed(123) mat = np.random.random(self.matrix_shape).astype(self.dtype) @@ -41,11 +42,15 @@ class TestMatrixPowerOp(OpTest): self.attrs = {"n": self.n} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_grad(self): self.check_grad( - ["X"], "Out", numeric_grad_delta=1e-5, max_relative_error=1e-7) + ["X"], + "Out", + numeric_grad_delta=1e-5, + max_relative_error=1e-7, + check_eager=True) class TestMatrixPowerOpN1(TestMatrixPowerOp): diff --git a/python/paddle/fluid/tests/unittests/test_multinomial_op.py b/python/paddle/fluid/tests/unittests/test_multinomial_op.py index cdb89bb964055d7cd5f87de3cf89eebc1d59ae97..81e8a168d98b3dc24d7b24cca4598edf1c36e2ca 100644 --- a/python/paddle/fluid/tests/unittests/test_multinomial_op.py +++ b/python/paddle/fluid/tests/unittests/test_multinomial_op.py @@ -20,6 +20,7 @@ import paddle.fluid as fluid from paddle.fluid import core from op_test import OpTest import numpy as np +from paddle.fluid.framework import _test_eager_guard def sample_output_one_dimension(out, dim): @@ -46,6 +47,7 @@ class TestMultinomialOp(OpTest): def setUp(self): paddle.enable_static() self.op_type = "multinomial" + self.python_api = paddle.multinomial self.init_data() self.inputs = {"X": self.input_np} @@ -113,6 +115,22 @@ class TestMultinomialApi(unittest.TestCase): sample_prob, prob, rtol=0, atol=0.01), "sample_prob: " + str(sample_prob) + "\nprob: " + str(prob)) + def test_eager(self): + # input probability is a vector, and replacement is True + paddle.disable_static() + with _test_eager_guard(): + x_numpy = np.random.rand(4) + x = paddle.to_tensor(x_numpy) + out = paddle.multinomial(x, num_samples=100000, replacement=True) + + sample_prob = sample_output_one_dimension(out.numpy(), 4) + prob = x_numpy / x_numpy.sum(axis=-1, keepdims=True) + self.assertTrue( + np.allclose( + sample_prob, prob, rtol=0, atol=0.01), + "sample_prob: " + str(sample_prob) + "\nprob: " + str(prob)) + paddle.enable_static() + def test_dygraph2(self): # input probability is a matrix, and replacement is True paddle.disable_static() @@ -128,6 +146,22 @@ class TestMultinomialApi(unittest.TestCase): "sample_prob: " + str(sample_prob) + "\nprob: " + str(prob)) paddle.enable_static() + def test_eager2(self): + # input probability is a matrix, and replacement is True + paddle.disable_static() + with _test_eager_guard(): + x_numpy = np.random.rand(3, 4) + x = paddle.to_tensor(x_numpy) + out = paddle.multinomial(x, num_samples=100000, replacement=True) + + sample_prob = sample_output_two_dimension(out.numpy(), [3, 4]) + prob = x_numpy / x_numpy.sum(axis=-1, keepdims=True) + self.assertTrue( + np.allclose( + sample_prob, prob, rtol=0, atol=0.01), + "sample_prob: " + str(sample_prob) + "\nprob: " + str(prob)) + paddle.enable_static() + def test_dygraph3(self): # replacement is False. number of samples must be less than number of categories. paddle.disable_static() @@ -217,4 +251,5 @@ class TestMultinomialError(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_segment_ops.py b/python/paddle/fluid/tests/unittests/test_segment_ops.py index e2aadbedbd07fdc77e9a6d1f6e2740a826032393..971995b875fe2fb0cf7977541e0db35338f684f5 100644 --- a/python/paddle/fluid/tests/unittests/test_segment_ops.py +++ b/python/paddle/fluid/tests/unittests/test_segment_ops.py @@ -93,6 +93,7 @@ class TestSegmentOps(OpTest): self.dtype = np.float64 self.shape = [30, 15] self.attrs = {"pooltype": "SUM"} + self.python_api = paddle.incubate.segment_sum def setUp(self): self.prepare() @@ -105,10 +106,10 @@ class TestSegmentOps(OpTest): self.outputs = {'Out': result.astype(self.dtype)} def test_check_output(self): - self.check_output() + self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(["X"], "Out") + self.check_grad(["X"], "Out", check_eager=False) class TestSegmentSum2(TestSegmentOps): @@ -136,6 +137,7 @@ class TestSegmentMax(TestSegmentOps): super(TestSegmentMax, self).prepare() self.shape = [40, 20] self.attrs = {'pooltype': "MAX"} + # self.python_api = paddle.incubate.segment_max def setUp(self): self.prepare() @@ -148,7 +150,8 @@ class TestSegmentMax(TestSegmentOps): self.outputs = {'Out': result.astype(self.dtype)} def test_check_grad(self): - self.check_grad(["X"], "Out", user_defined_grads=[self.gradient]) + self.check_grad( + ["X"], "Out", user_defined_grads=[self.gradient], check_eager=False) class TestSegmentMax2(TestSegmentMax): @@ -164,6 +167,7 @@ class TestSegmentMin(TestSegmentMax): def prepare(self): super(TestSegmentMin, self).prepare() self.attrs = {'pooltype': "MIN"} + #self.python_api = paddle.incubate.segment_min class TestSegmentMin2(TestSegmentMin): @@ -180,6 +184,7 @@ class TestSegmentMean(TestSegmentOps): super(TestSegmentMean, self).prepare() self.shape = [40, 20] self.attrs = {'pooltype': "MEAN"} + #self.python_api = paddle.incubate.segment_mean def setUp(self): self.prepare() @@ -199,6 +204,7 @@ class TestSegmentMean2(TestSegmentMean): self.dtype = np.float32 self.shape = [30, 20] self.attrs = {'pooltype': "MEAN"} + #self.python_api = paddle.incubate.segment_mean class API_SegmentOpsTest(unittest.TestCase): @@ -259,4 +265,5 @@ class API_SegmentOpsTest(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_selu_op.py b/python/paddle/fluid/tests/unittests/test_selu_op.py index f16198817945ab826004d51f1264a9cbe8fc22a9..98a5cdb8125dd184584be9522648b54725ddac2e 100644 --- a/python/paddle/fluid/tests/unittests/test_selu_op.py +++ b/python/paddle/fluid/tests/unittests/test_selu_op.py @@ -24,6 +24,7 @@ import paddle.fluid as fluid import paddle.nn as nn import paddle.nn.functional as F from paddle.fluid import compiler, Program, program_guard +from paddle.fluid.framework import _test_eager_guard def ref_selu(x, @@ -113,6 +114,15 @@ class TestSeluAPI(unittest.TestCase): out_ref = ref_selu(self.x_np, self.scale, self.alpha) for r in [out1, out2]: self.assertEqual(np.allclose(out_ref, r.numpy()), True) + + with _test_eager_guard(): + x = paddle.to_tensor(self.x_np) + out1 = F.selu(x, self.scale, self.alpha) + selu = paddle.nn.SELU(self.scale, self.alpha) + out2 = selu(x) + out_ref = ref_selu(self.x_np, self.scale, self.alpha) + for r in [out1, out2]: + self.assertEqual(np.allclose(out_ref, r.numpy()), True) paddle.enable_static() def test_fluid_api(self): @@ -145,4 +155,5 @@ class TestSeluAPI(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_shard_index_op.py b/python/paddle/fluid/tests/unittests/test_shard_index_op.py index 9ccf1f254a5566bfebce1d18873b76f5961ff65b..83e8f77d9e84806d242845b6998415b409d9107e 100644 --- a/python/paddle/fluid/tests/unittests/test_shard_index_op.py +++ b/python/paddle/fluid/tests/unittests/test_shard_index_op.py @@ -22,10 +22,12 @@ import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.framework as framework from paddle.fluid.framework import Program, program_guard +import paddle def common_setup(self, index_num, nshards, shard_id, ignore_value): self.op_type = 'shard_index' + self.python_api = paddle.shard_index x_lod = [[i for i in range(10)]] N = sum(x_lod[0]) x = [np.random.randint(0, index_num - 1) for i in range(N)] @@ -54,7 +56,7 @@ class TestShardIndexShardId0Op(OpTest): common_setup(self, 20, 2, 0, -1) def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) class TestShardIndexShardId1Op(OpTest): @@ -62,7 +64,7 @@ class TestShardIndexShardId1Op(OpTest): common_setup(self, 20, 2, 1, -1) def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) class TestShardIndexIgnoreValueOp(OpTest): @@ -70,7 +72,7 @@ class TestShardIndexIgnoreValueOp(OpTest): common_setup(self, 20, 2, 0, -2) def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) class TestShardIndexNotEvenlyDividedOp(OpTest): @@ -78,8 +80,9 @@ class TestShardIndexNotEvenlyDividedOp(OpTest): common_setup(self, 15, 2, 1, -1) def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py b/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py index 51751588f7b94447080f80002ceb29dac2429529..775f090c7ad9445fac76b2fcd68241199be4ae74 100644 --- a/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py +++ b/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py @@ -22,6 +22,7 @@ import paddle.fluid.core as core import unittest from paddle.fluid import compiler, Program, program_guard import paddle.fluid as fluid +import paddle class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): @@ -30,6 +31,7 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" + self.python_api = paddle.fluid.layers.sigmoid_cross_entropy_with_logits batch_size = 64 num_classes = 20 self.inputs = { @@ -49,10 +51,10 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): self.outputs = {'Out': -term1 - term2} def test_check_output(self): - self.check_output() + self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=False) class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): @@ -61,6 +63,7 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" + self.python_api = paddle.fluid.layers.sigmoid_cross_entropy_with_logits batch_size = 64 num_classes = 20 ignore_index = -1 @@ -83,10 +86,10 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): self.outputs = {'Out': out} def test_check_output(self): - self.check_output() + self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=False) class TestSigmoidCrossEntropyWithLogitsOp3(OpTest): @@ -95,6 +98,7 @@ class TestSigmoidCrossEntropyWithLogitsOp3(OpTest): def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" + self.python_api = paddle.fluid.layers.sigmoid_cross_entropy_with_logits batch_size = 64 num_classes = 20 self.inputs = { @@ -114,15 +118,16 @@ class TestSigmoidCrossEntropyWithLogitsOp3(OpTest): self.outputs = {'Out': -term1 - term2} def test_check_output(self): - self.check_output() + self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=False) class TestSigmoidCrossEntropyWithNorm(OpTest): def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" + self.python_api = paddle.fluid.layers.sigmoid_cross_entropy_with_logits batch_size = 64 num_classes = 20 ignore_index = -1 @@ -145,10 +150,10 @@ class TestSigmoidCrossEntropyWithNorm(OpTest): self.outputs = {'Out': out} def test_check_output(self): - self.check_output() + self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=False) class TestSigmoidCrossEntropyWithLogitsOp5(OpTest): @@ -157,6 +162,7 @@ class TestSigmoidCrossEntropyWithLogitsOp5(OpTest): def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" + self.python_api = paddle.fluid.layers.sigmoid_cross_entropy_with_logits batch_size = [10, 10] num_classes = 20 self.inputs = { @@ -176,15 +182,16 @@ class TestSigmoidCrossEntropyWithLogitsOp5(OpTest): self.outputs = {'Out': -term1 - term2} def test_check_output(self): - self.check_output() + self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=False) class TestSigmoidCrossEntropyWithNorm2(OpTest): def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" + self.python_api = paddle.fluid.layers.sigmoid_cross_entropy_with_logits batch_size = [10, 10] num_classes = 20 ignore_index = -1 @@ -207,10 +214,10 @@ class TestSigmoidCrossEntropyWithNorm2(OpTest): self.outputs = {'Out': out} def test_check_output(self): - self.check_output() + self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=False) class TestSigmoidCrossEntropyWithLogitsOp6(OpTest): @@ -219,6 +226,7 @@ class TestSigmoidCrossEntropyWithLogitsOp6(OpTest): def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" + self.python_api = paddle.fluid.layers.sigmoid_cross_entropy_with_logits batch_size = [10, 10] num_classes = 20 self.inputs = { @@ -238,10 +246,10 @@ class TestSigmoidCrossEntropyWithLogitsOp6(OpTest): self.outputs = {'Out': -term1 - term2} def test_check_output(self): - self.check_output() + self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=False) class TestSigmoidCrossEntropyWithLogitsOpError(unittest.TestCase): @@ -271,4 +279,5 @@ class TestSigmoidCrossEntropyWithLogitsOpError(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_size_op.py b/python/paddle/fluid/tests/unittests/test_size_op.py index 09cd35391bae08d3067d80ee6fbb2d2927724192..0e6dc551da0080d29d99c40e5f8133847f358783 100644 --- a/python/paddle/fluid/tests/unittests/test_size_op.py +++ b/python/paddle/fluid/tests/unittests/test_size_op.py @@ -22,6 +22,7 @@ from op_test import OpTest class TestSizeOp(OpTest): def setUp(self): self.op_type = "size" + self.python_api = paddle.numel self.shape = [] self.config() input = np.zeros(self.shape, dtype='bool') @@ -32,7 +33,7 @@ class TestSizeOp(OpTest): pass def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) class TestRank1Tensor(TestSizeOp): diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 64cc2b5add6a6b275c54a1a5565bb277d0562b4e..405c0b1fc1c60cfd23305f0adafcf8740ff77b99 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -85,10 +85,10 @@ class TestSoftmaxOp(OpTest): place, atol=1e-5, check_dygraph=(self.use_mkldnn == False), - check_eager=False) + check_eager=True) else: self.check_output( - check_dygraph=(self.use_mkldnn == False), check_eager=False) + check_dygraph=(self.use_mkldnn == False), check_eager=True) def test_check_grad(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode diff --git a/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py b/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py index b7650efc8c2153e4daf56d97c3a20862249d4413..974b5a5b5143dd041222014c2bab9316b0125af8 100644 --- a/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py +++ b/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py @@ -29,6 +29,7 @@ class TestTakeAlongAxisOp(OpTest): def setUp(self): self.init_data() self.op_type = "take_along_axis" + self.python_api = paddle.take_along_axis self.xnp = np.random.random(self.x_shape).astype(self.x_type) self.target = np.take_along_axis(self.xnp, self.index, self.axis) broadcast_shape_list = list(self.x_shape) @@ -43,10 +44,10 @@ class TestTakeAlongAxisOp(OpTest): self.outputs = {'Result': self.target} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['Input'], 'Result') + self.check_grad(['Input'], 'Result', check_eager=True) def init_data(self): self.x_type = "float64" diff --git a/python/paddle/fluid/tests/unittests/test_tile_op.py b/python/paddle/fluid/tests/unittests/test_tile_op.py index b0f065a26a006ee3553a84938fb5b6b2db7b3172..a01cf590e0cc6cd136335e9a74720a2b460dbb53 100644 --- a/python/paddle/fluid/tests/unittests/test_tile_op.py +++ b/python/paddle/fluid/tests/unittests/test_tile_op.py @@ -27,6 +27,7 @@ class TestTileOpRank1(OpTest): def setUp(self): self.op_type = "tile" self.init_data() + self.python_api = paddle.tile self.inputs = {'X': np.random.random(self.ori_shape).astype("float64")} self.attrs = {'repeat_times': self.repeat_times} @@ -38,10 +39,10 @@ class TestTileOpRank1(OpTest): self.repeat_times = [2] def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) # with dimension expanding @@ -85,6 +86,7 @@ class TestTileOpRank4(TestTileOpRank1): class TestTileOpRank1_tensor_attr(OpTest): def setUp(self): self.op_type = "tile" + self.python_api = paddle.tile self.init_data() repeat_times_tensor = [] for index, ele in enumerate(self.repeat_times): @@ -160,6 +162,7 @@ class TestTileOpRank2_tensor(TestTileOpRank1_tensor): class TestTileOpInteger(OpTest): def setUp(self): self.op_type = "tile" + self.python_api = paddle.tile self.inputs = { 'X': np.random.randint( 10, size=(4, 4, 5)).astype("int32") @@ -169,26 +172,28 @@ class TestTileOpInteger(OpTest): self.outputs = {'Out': output} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) # Situation 5: input x is Bool class TestTileOpBoolean(OpTest): def setUp(self): self.op_type = "tile" + self.python_api = paddle.tile self.inputs = {'X': np.random.randint(2, size=(2, 4, 5)).astype("bool")} self.attrs = {'repeat_times': [2, 1, 4]} output = np.tile(self.inputs['X'], (2, 1, 4)) self.outputs = {'Out': output} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) # Situation 56: input x is Integer class TestTileOpInt64_t(OpTest): def setUp(self): self.op_type = "tile" + self.python_api = paddle.tile self.inputs = { 'X': np.random.randint( 10, size=(2, 4, 5)).astype("int64") @@ -198,7 +203,7 @@ class TestTileOpInt64_t(OpTest): self.outputs = {'Out': output} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) class TestTileError(unittest.TestCase): @@ -248,4 +253,5 @@ class TestTileAPI(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_trace_op.py b/python/paddle/fluid/tests/unittests/test_trace_op.py index 3320b240e56155f8bfc2f6d8e43306f36e651869..315ab9525f3b336e907fdd01d6b04c1f3d20c8d0 100644 --- a/python/paddle/fluid/tests/unittests/test_trace_op.py +++ b/python/paddle/fluid/tests/unittests/test_trace_op.py @@ -27,14 +27,15 @@ import paddle class TestTraceOp(OpTest): def setUp(self): self.op_type = "trace" + self.python_api = paddle.trace self.init_config() self.outputs = {'Out': self.target} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['Input'], 'Out') + self.check_grad(['Input'], 'Out', check_eager=True) def init_config(self): self.case = np.random.randn(20, 6).astype('float64') diff --git a/python/paddle/fluid/tests/unittests/test_unfold_op.py b/python/paddle/fluid/tests/unittests/test_unfold_op.py index 3790539c1c2972c61ef584bad9f3eba81095de21..72ea3cbbd5630ad58b232b2e36551fe9d7d85843 100644 --- a/python/paddle/fluid/tests/unittests/test_unfold_op.py +++ b/python/paddle/fluid/tests/unittests/test_unfold_op.py @@ -95,6 +95,7 @@ class TestUnfoldOp(OpTest): def setUp(self): self.op_type = 'unfold' self.set_data() + self.python_api = paddle.nn.functional.unfold def test_check_output(self): self.check_output(check_eager=True) diff --git a/python/paddle/fluid/tests/unittests/test_where_index.py b/python/paddle/fluid/tests/unittests/test_where_index.py index 1c5705023b87a8adb63d35839b4f5a7cdd8890dc..fa306a544bccfe28ba0e6f9797cfd77166e5bf28 100644 --- a/python/paddle/fluid/tests/unittests/test_where_index.py +++ b/python/paddle/fluid/tests/unittests/test_where_index.py @@ -21,15 +21,17 @@ import paddle.fluid.core as core from paddle.fluid.op import Operator import paddle.fluid as fluid from paddle.fluid import Program, program_guard +import paddle class TestWhereIndexOp(OpTest): def setUp(self): self.op_type = "where_index" + self.pythona_api = paddle.fluid.layers.where self.init_config() def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def init_config(self): self.inputs = {'Condition': np.array([True, False, True]), } @@ -111,4 +113,5 @@ class TestWhereRaiseError(unittest.TestCase): if __name__ == "__main__": + paddle.enable_static() unittest.main() diff --git a/python/paddle/incubate/tensor/math.py b/python/paddle/incubate/tensor/math.py index 9f577d5ff38024fe9264deec2980ff091996a1d8..cb85ad0b7411c120b2704eb1639889202d77a0de 100644 --- a/python/paddle/incubate/tensor/math.py +++ b/python/paddle/incubate/tensor/math.py @@ -15,6 +15,7 @@ from paddle.fluid.layer_helper import LayerHelper, in_dygraph_mode from paddle.fluid.data_feeder import check_variable_and_dtype from paddle import _C_ops +from paddle.fluid.framework import _in_eager_mode __all__ = [] @@ -51,6 +52,8 @@ def segment_sum(data, segment_ids, name=None): """ if in_dygraph_mode(): + if _in_eager_mode(): + return _C_ops.final_state_segment_pool(data, segment_idsm, "SUM")[0] out, tmp = _C_ops.segment_pool(data, segment_ids, 'pooltype', "SUM") return out @@ -104,6 +107,9 @@ def segment_mean(data, segment_ids, name=None): """ if in_dygraph_mode(): + if _in_eager_mode(): + return _C_ops.final_state_segment_pool(data, segment_idsm, + "MEAN")[0] out, tmp = _C_ops.segment_pool(data, segment_ids, 'pooltype', "MEAN") return out @@ -156,6 +162,8 @@ def segment_min(data, segment_ids, name=None): """ if in_dygraph_mode(): + if _in_eager_mode(): + return _C_ops.final_state_segment_pool(data, segment_idsm, "MIN")[0] out, tmp = _C_ops.segment_pool(data, segment_ids, 'pooltype', "MIN") return out @@ -208,6 +216,8 @@ def segment_max(data, segment_ids, name=None): """ if in_dygraph_mode(): + if _in_eager_mode(): + return _C_ops.final_state_segment_pool(data, segment_idsm, "MAX")[0] out, tmp = _C_ops.segment_pool(data, segment_ids, 'pooltype', "MAX") return out diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index 5491c41c8305f871dc42ac808f93397114e5e5f9..67ff62355cc6fd8e5f60211edb9b346c76f8b0e0 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -22,7 +22,7 @@ import numpy as np from ..fluid.data_feeder import check_variable_and_dtype from ..fluid.layer_helper import LayerHelper -from ..fluid.framework import core, _varbase_creator, in_dygraph_mode +from ..fluid.framework import core, _varbase_creator, in_dygraph_mode, _in_eager_mode import paddle from paddle import _C_ops @@ -798,7 +798,7 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None): total = _varbase_creator(dtype="int32") topk_out, topk_indices = paddle.topk(input, k=k) - if _in_eager_mode: + if _in_eager_mode(): _acc = _C_ops.final_state_accuracy(topk_out, topk_indices, label) return _acc _acc, _, _ = _C_ops.accuracy(topk_out, topk_indices, label, correct, diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 86ed0cb390f62591b292d783bd1fa560f6855b60..cef6ba48f83994cfcc47ae408bed1bbe80ddb178 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -22,7 +22,7 @@ from ...tensor.math import multiply import warnings from ...fluid.layer_helper import LayerHelper -from ...fluid.framework import convert_np_dtype_to_dtype_ +from ...fluid.framework import convert_np_dtype_to_dtype_, _in_eager_mode from ...fluid.data_feeder import check_variable_and_dtype, check_dtype import paddle from paddle import _C_ops, in_dynamic_mode @@ -783,6 +783,8 @@ def selu(x, "The alpha must be no less than zero. Received: {}.".format(alpha)) if in_dynamic_mode(): + if _in_eager_mode(): + return _C_ops.final_state_selu(x, scale, alpha) return _C_ops.selu(x, 'scale', scale, 'alpha', alpha) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'selu') diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 1865cf8f5114aee4e8959ef09320f9b7e9c562d4..bc027b19147d7945a2267d122e03d705e25173a5 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -1439,6 +1439,8 @@ def bincount(x, weights=None, minlength=0, name=None): raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): + if _in_eager_mode(): + return _C_ops.final_state_bincount(x, weights, minlength) return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) @@ -1748,6 +1750,8 @@ def matrix_power(x, n, name=None): # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): + if _in_eager_mode(): + return _C_ops.final_state_matrix_power(x, n) return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') @@ -2266,6 +2270,8 @@ def eigh(x, UPLO='L', name=None): """ if paddle.in_dynamic_mode(): + if _in_eager_mode(): + return _C_ops.final_state_eigh(x, UPLO) return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 02cace9f0f8f6f4962ca747ec316c1c5a468366d..a07c1d9c103064f3bda4d4cbc3dbcdc6ff3fd0cf 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -453,8 +453,6 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True): if binary_op: return op(x, y) else: - if _in_eager_mode(): - return _C_op.final_state_bitewise_not(x) return op(x) check_variable_and_dtype( @@ -581,7 +579,8 @@ def bitwise_not(x, out=None, name=None): res = paddle.bitwise_not(x) print(res) # [4, 0, -2] """ - + if _in_eager_mode() and out == None: + return _C_op.final_state_bitwise_not(x) return _bitwise_op( op_name="bitwise_not", x=x, y=None, name=name, out=out, binary_op=False) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 32ccecbc6d9f0282b86f100e1b910667fab41cb2..fc4d882ff001c52836ca756e36438b0f8617c248 100755 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -37,6 +37,7 @@ from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only import paddle from paddle import _C_ops from paddle.tensor.attribute import _complex_to_real_dtype, _real_to_complex_dtype +from paddle.fluid.framework import _in_eager_mode __all__ = [] @@ -947,6 +948,9 @@ def split(x, num_or_sections, axis=0, name=None): print(out1.shape) # [3, 3, 5] print(out2.shape) # [3, 3, 5] """ + if paddle.in_dygraph_mode(): + if _in_eager_mode(): + return _C_ops.final_state_split(x, num_or_sections, dim) return paddle.fluid.layers.split( input=x, num_or_sections=num_or_sections, dim=axis, name=name) @@ -1746,6 +1750,8 @@ def tile(x, repeat_times, name=None): # [[1, 2, 3], [1, 2, 3]] """ if paddle.in_dynamic_mode(): + if _in_eager_mode(): + return _C_ops.final_state_tile(x, repeat_times) return _C_ops.tile(x, 'repeat_times', repeat_times) check_type(repeat_times, 'repeat_times', (list, tuple, Variable), 'tile') if isinstance(repeat_times, Variable): @@ -2822,6 +2828,8 @@ def take_along_axis(arr, indices, axis): broadcast_shape_list[axis] = list(arr.shape)[axis] broadcast_shape = tuple(broadcast_shape_list) arr = paddle.broadcast_to(arr, broadcast_shape) + if _in_eager_mode(): + return _C_ops.final_state_take_along_axis(arr, indices, axis) return _C_ops.take_along_axis(arr, indices, 'Axis', axis) check_variable_and_dtype( arr, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], @@ -2887,6 +2895,9 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'): if broadcast_shape: indices = paddle.broadcast_to(indices, broadcast_shape) values = paddle.broadcast_to(values, indices.shape) + if _in_eager_mode(): + return _C_ops.final_state_put_alone_axis(arr, indices, value, axis, + reduce) return _C_ops.put_along_axis(arr, indices, values, "Axis", axis, "Reduce", reduce) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index af7b9d43bf89fe519a425c1be2403e85acb37639..8c2dbaa7056c80e1ef4595dd2a717a07178c4466 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -322,6 +322,8 @@ def subtract(x, y, name=None): axis = -1 act = None if paddle.in_dynamic_mode(): + if _in_eager_mode(): + return _C_ops.final_state_subtract( x, y) return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name=op_type) return _elementwise_op(LayerHelper(op_type, **locals())) @@ -2344,6 +2346,8 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): __check_input(input, offset, axis1, axis2) if paddle.in_dynamic_mode(): + if _in_eager_mode(): + return _C_ops.final_state_trace( x, offset, axis1, axis2 ) return _C_ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2) inputs = {'Input': [x]} @@ -2566,6 +2570,8 @@ def cumsum(x, axis=None, dtype=None, name=None): x = cast(x, dtype) if paddle.in_dynamic_mode(): + if _in_eager_mode(): + return _C_ops._final_state_cumsum(x, axis, flatten, False, False) if axis is None: return _C_ops.cumsum(x, 'flatten', flatten) else: @@ -2816,6 +2822,8 @@ def sign(x, name=None): print(out) # [1.0, 0.0, -1.0, 1.0] """ if paddle.in_dynamic_mode(): + if _in_eager_model(): + return _C_op.final_state_sign(x) return _C_ops.sign(x) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'sign') @@ -2897,6 +2905,8 @@ def increment(x, value=1.0, name=None): """ if paddle.in_dynamic_mode(): + if _in_eager_mode(): + return _C_ops.final_state_increment( x, value) return _C_ops.increment(x, 'step', value) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], @@ -3430,6 +3440,8 @@ def erfinv(x, name=None): check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'erfinv') if paddle.in_dynamic_mode(): + if _in_eager_mode(): + return _C_ops.final_state_erfinv( x ) return _C_ops.erfinv(x) helper = LayerHelper('erfinv', **locals()) diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 660803f9f7475997b19be4635b7e89aa055e9c83..63686e48e1cb7d7f319e8f20e232dd138f9f936c 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -22,6 +22,7 @@ from ..fluid.layers import utils import paddle from paddle import _C_ops from paddle.static import Variable +from paddle.fluid.framework import _in_eager_mode __all__ = [] @@ -67,6 +68,8 @@ def bernoulli(x, name=None): """ if paddle.in_dynamic_mode(): + if _in_eager_mode(): + return _C_ops.final_state_bernoulli(x) return _C_ops.bernoulli(x) check_variable_and_dtype(x, "x", ["float32", "float64"], "bernoulli") @@ -175,6 +178,8 @@ def multinomial(x, num_samples=1, replacement=False, name=None): "multinomial op is not supported on ROCM yet.") if paddle.in_dynamic_mode(): + if _in_eager_mode(): + return _C_ops.final_state_multinomial(x, num_samples, replacement) return _C_ops.multinomial(x, 'num_samples', num_samples, 'replacement', replacement) diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index a49b0521cbd52079fee80e55c25f2d75b8eaaaef..a30d6f1ab3510275186669dbadd9ab87a463522c 100644 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -23,6 +23,7 @@ from paddle.common_ops_import import Variable from paddle.common_ops_import import VarDesc from paddle import _C_ops from .logic import logical_not +from paddle.fluid.framework import _in_eager_mode # TODO: define searching & indexing functions of a tensor # from ..fluid.layers import has_inf #DEFINE_ALIAS @@ -170,6 +171,9 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None): axis = 0 if paddle.in_dynamic_mode(): + if _in_eager_mode(): + return _C_ops.final_state_argmin(x, axis, keepdim, flatten, + var_dtype) out = _C_ops.arg_max(x, 'axis', axis, 'dtype', var_dtype, 'keepdims', keepdim, 'flatten', flatten) return out diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 7091f770bd492abc3a4de9dfebd850748dc0aadb..6479025e71ca74267647837e56392d9df9acfc33 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -267,18 +267,6 @@ func : diagonal backward : diagonal_grad - - -# softmax -- api : softmax - args : (Tensor x, int axis) - output : Tensor - infer_meta : - func : SoftmaxInferMeta - kernel : - func : softmax - backward : softmax_grad - # # maxout # - api : maxout # args : (Tensor x, int groups, int axis) @@ -298,6 +286,7 @@ param : [index] kernel : func : put_along_axis + dtype : x backward : put_along_axis_grad @@ -310,6 +299,7 @@ param : [index] kernel : func : take_along_axis + dtype : x backward : take_along_axis_grad # matrix_power @@ -342,6 +332,7 @@ kernel : func : segment_pool backward : segment_pool_grad + # accuracy - api : accuracy @@ -351,6 +342,7 @@ func : AccuracyInferMeta kernel : func : accuracy + dtype : x # sin - api : sin @@ -465,19 +457,9 @@ func : atanh backward : atanh_grad -# relu -- api : relu - args : (Tensor x) - output : Tensor - infer_meta : - func : UnchangedInferMeta - kernel : - func : relu - backward : relu_grad - # arg_min # int64 ???? dtype -- api : arg_min +- api : argmin args : (Tensor x, int64 axis, bool keepdims, bool flatten, int dtype) output : Tensor infer_meta : @@ -486,7 +468,7 @@ func : arg_min # arg_max # int64 ???? dtype -- api : arg_max +- api : argmax args : (Tensor x, int64 axis, bool keepdims, bool flatten, int dtype) output : Tensor infer_meta : diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index 67a618a5057eb00521cf38d990133e0fd6af1ec8..ca19c7cd47a68334800325e7b5d767be3d0c5a38 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -57,7 +57,7 @@ # - backward_api : norm_grad # forward : norm (Tensor x, int axis, float epsilon, bool is_test) -> Tensor(out), Tensor(norm) -# args : (Tensor out_grad, Tensor x, Tensor norm, int axis, float epsilon, bool is_test) +# args : (Tensor x, Tensor norm, Tensor out_grad, int axis, float epsilon, bool is_test) # output : Tensor(x_grad) # infer_meta : # func : UnchangedInferMeta