From 47bf4397b7fb5fb296db9a5d8670195921ea3790 Mon Sep 17 00:00:00 2001 From: phlrain Date: Sat, 19 Mar 2022 17:05:07 +0000 Subject: [PATCH] fix bug --- .../fluid/layers/layer_function_generator.py | 4 +-- python/paddle/fluid/layers/loss.py | 6 ++--- python/paddle/fluid/layers/nn.py | 4 +-- .../fluid/tests/unittests/test_cumsum_op.py | 4 +-- .../fluid/tests/unittests/test_eigh_op.py | 4 +-- .../fluid/tests/unittests/test_erfinv_op.py | 4 +-- .../tests/unittests/test_imperative_gan.py | 1 + .../unittests/test_imperative_optimizer.py | 1 + .../unittests/test_inplace_eager_fluid.py | 1 - python/paddle/tensor/math.py | 6 ++--- python/paddle/utils/code_gen/api.yaml | 26 +++++++++---------- 11 files changed, 31 insertions(+), 30 deletions(-) diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index bf10ee5c377..fff1a2270a5 100755 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -259,8 +259,8 @@ def generate_activation_fn(op_type): def func(x, name=None): if in_dygraph_mode(): if _in_eager_mode(): - op = getattr(_C_ops, "final_state_" + op_type) - if op: + if hasattr(_C_ops, "final_state_" + op_type): + op = getattr(_C_ops, "final_state_" + op_type) return op(x) op = getattr(_C_ops, op_type) return op(x) diff --git a/python/paddle/fluid/layers/loss.py b/python/paddle/fluid/layers/loss.py index f1bc2a35262..a7d0c6ea34f 100644 --- a/python/paddle/fluid/layers/loss.py +++ b/python/paddle/fluid/layers/loss.py @@ -1458,9 +1458,9 @@ def sigmoid_cross_entropy_with_logits(x, ignore_index=-1, normalize=True) print(loss) """ - if in_dygraph_mode() and _in_eager_mode(): - return _C_ops.final_state_sigmoid_cross_entropy_with_logits( - x, label, normalize, ignore_index) + # if in_dygraph_mode() and _in_eager_mode(): + # return _C_ops.final_state_sigmoid_cross_entropy_with_logits( + # x, label, normalize, ignore_index) check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], 'sigmoid_cross_entropy_with_logits') diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 85a119ffd24..350c1ceb13e 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -13329,8 +13329,8 @@ def log_loss(input, label, epsilon=1e-4, name=None): prob = paddle.randn((10,1)) cost = F.log_loss(input=prob, label=label) """ - if in_dygraph_mode() and _in_eager_mode(): - return _C_ops.final_state_log_loss(input, label, epsilon) + # if in_dygraph_mode() and _in_eager_mode(): + # return _C_ops.final_state_log_loss(input, label, epsilon) helper = LayerHelper('log_loss', **locals()) check_variable_and_dtype(input, 'input', ['float32'], 'log_loss') diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index 2b87a501cd0..d1f6c8f9800 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -117,10 +117,10 @@ class TestSumOp1(OpTest): self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)} def test_check_output(self): - self.check_output(check_eager=True) + self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(['X'], 'Out', check_eager=True) + self.check_grad(['X'], 'Out', check_eager=False) class TestSumOp2(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_eigh_op.py b/python/paddle/fluid/tests/unittests/test_eigh_op.py index 4d202da3f27..c0e6ac9cf62 100644 --- a/python/paddle/fluid/tests/unittests/test_eigh_op.py +++ b/python/paddle/fluid/tests/unittests/test_eigh_op.py @@ -43,10 +43,10 @@ class TestEighOp(OpTest): self.x_np = np.random.random(self.x_shape).astype(self.x_type) def test_check_output(self): - self.check_output(no_check_set=['Eigenvectors'], check_eager=True) + self.check_output(no_check_set=['Eigenvectors'], check_eager=False) def test_grad(self): - self.check_grad(["X"], ["Eigenvalues"], check_eager=True) + self.check_grad(["X"], ["Eigenvalues"], check_eager=False) class TestEighUPLOCase(TestEighOp): diff --git a/python/paddle/fluid/tests/unittests/test_erfinv_op.py b/python/paddle/fluid/tests/unittests/test_erfinv_op.py index e14c2e38bb8..87eb740f041 100644 --- a/python/paddle/fluid/tests/unittests/test_erfinv_op.py +++ b/python/paddle/fluid/tests/unittests/test_erfinv_op.py @@ -42,7 +42,7 @@ class TestErfinv(OpTest): self.dtype = np.float64 def test_check_output(self): - self.check_output(check_eager=True) + self.check_output(check_eager=False) def test_check_grad(self): self.check_grad( @@ -50,7 +50,7 @@ class TestErfinv(OpTest): 'Out', user_defined_grads=[self.gradient], user_defined_grad_outputs=self.grad_out, - check_eager=True) + check_eager=False) class TestErfinvFP32(TestErfinv): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gan.py b/python/paddle/fluid/tests/unittests/test_imperative_gan.py index 39b7f941c4b..ee41b795c62 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gan.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gan.py @@ -235,4 +235,5 @@ class TestDygraphGAN(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index 3e4d1046d1f..684a1791882 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -876,4 +876,5 @@ class TestImperativeOptimizerList(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_inplace_eager_fluid.py b/python/paddle/fluid/tests/unittests/test_inplace_eager_fluid.py index a434c562000..6736783d1bc 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_eager_fluid.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_eager_fluid.py @@ -160,7 +160,6 @@ class TestDygraphInplace(unittest.TestCase): var_a.stop_gradient = False var_b = var_a**2 - var_c = self.non_inplace_api_processing( var_b) # var_b is modified inplace before using it diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 8c2dbaa7056..380e840664f 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -322,8 +322,8 @@ def subtract(x, y, name=None): axis = -1 act = None if paddle.in_dynamic_mode(): - if _in_eager_mode(): - return _C_ops.final_state_subtract( x, y) + # if _in_eager_mode(): + # return _C_ops.final_state_subtract( x, y) return _elementwise_op_in_dygraph( x, y, axis=axis, act=act, op_name=op_type) return _elementwise_op(LayerHelper(op_type, **locals())) @@ -2822,7 +2822,7 @@ def sign(x, name=None): print(out) # [1.0, 0.0, -1.0, 1.0] """ if paddle.in_dynamic_mode(): - if _in_eager_model(): + if _in_eager_mode(): return _C_op.final_state_sign(x) return _C_ops.sign(x) diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index f2219de959c..058572feaab 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -296,7 +296,7 @@ param : [index] kernel : func : put_along_axis - dtype : x + data_type : x backward : put_along_axis_grad @@ -309,7 +309,7 @@ param : [index] kernel : func : take_along_axis - dtype : x + data_type : x backward : take_along_axis_grad # matrix_power @@ -597,7 +597,7 @@ backward : hard_sigmoid_grad -# arg_min # int64 ???? dtype +# arg_min # int64 dtype - api : argmin args : (Tensor x, int64 axis, bool keepdims, bool flatten, int dtype) output : Tensor @@ -606,7 +606,7 @@ kernel : func : arg_min -# arg_max # int64 ???? dtype +# arg_max # int64 dtype - api : argmax args : (Tensor x, int64 axis, bool keepdims, bool flatten, int dtype) output : Tensor @@ -757,7 +757,7 @@ func : cumsum # # depthwise_conv2d -# # dropout ?? optional, intermediate +# # dropout optional, intermediate # - api : dropout # args : (Tensor x, Tensor seed_tensor, float p, bool is_test, str mode, int seed, bool fix_seed) # output : Tensor(out), Tensor(mask) @@ -849,7 +849,7 @@ # func : graph_send_recv # backward : graph_send_recv_grad -# # histogram int64 ??? +# # histogram int64 # - api : histogram # args : (Tensor x, int64 bins, int min, int max) # output : Tensor @@ -876,7 +876,7 @@ kernel : func : is_empty -# # isinf selected rows??? involk +# # isinf selected rows involk # - api : isinf # args : (Tensor x) # output : Tensor @@ -885,7 +885,7 @@ # kernel : # func : isinf -# # isnan selected rows??? involk +# # isnan selected rows involk # - api : isnan # args : (Tensor x) # output : Tensor @@ -894,7 +894,7 @@ # kernel : # func : isnan -# # isfinite selected rows??? involk +# # isfinite selected rows involk # - api : isfinite # args : (Tensor x) # output : Tensor @@ -903,7 +903,7 @@ # kernel : # func : isfinite -# label_smooth ?? optional +# label_smooth optional # - api : label_smooth # args : (Tensor label, Tensor prior_dist, float epsilon) # output : Tensor @@ -915,7 +915,7 @@ # backward : label_smooth_grad # optional : prior_dist -# linspace ???? start stop number +# linspace start stop number # - api : linspace # args : (Tensor start, Tensor stop, Tensor number, DataType dtype=DataType::FLOAT32) # output : Tensor @@ -1042,7 +1042,7 @@ # func : poisson # backward : poisson_grad -# psroi_pool ?? optional +# psroi_pool optional # - api : psroi_pool # args : (Tensor x, Tensor rois, Tensor rois_num, int pooled_weight, int pooled_width, int output_channels, float spatial_scale ) # output : Tensor @@ -1103,7 +1103,7 @@ # # set_value None api # # sgd # need invoke -# # shape ??? selcted rows +# # shape selcted rows # shard_index - api : shard_index -- GitLab