From b810961df8620bddefead8cbe1fb48797f17bdf8 Mon Sep 17 00:00:00 2001 From: hong <43953930+phlrain@users.noreply.github.com> Date: Mon, 11 Apr 2022 21:56:01 +0800 Subject: [PATCH] Add dist norm yamls (#41424) (#41615) * add dist erfinv gumbel softmax * fix test gumbel softmax op bug * try to fix gumbel softmax error * add label smooth backlist --- .../fluid/tests/unittests/test_dist_op.py | 10 ++++-- .../fluid/tests/unittests/test_erfinv_op.py | 3 +- .../tests/unittests/test_expand_v2_op.py | 1 + .../tests/unittests/test_gumbel_softmax_op.py | 18 +++++++---- python/paddle/nn/functional/activation.py | 3 ++ python/paddle/tensor/linalg.py | 3 ++ python/paddle/tensor/math.py | 3 ++ python/paddle/utils/code_gen/api.yaml | 2 +- python/paddle/utils/code_gen/backward.yaml | 31 +++++++++++++------ 9 files changed, 54 insertions(+), 20 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_dist_op.py b/python/paddle/fluid/tests/unittests/test_dist_op.py index b9b8ea92cb3..ad999c3feae 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_op.py +++ b/python/paddle/fluid/tests/unittests/test_dist_op.py @@ -37,6 +37,7 @@ def dist(x, y, p): class TestDistOp(OpTest): def setUp(self): self.op_type = 'dist' + self.python_api = paddle.dist self.attrs = {} self.init_case() self.init_data_type() @@ -106,10 +107,14 @@ class TestDistOp(OpTest): return x_grad, y_grad def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(["X", "Y"], "Out", user_defined_grads=self.gradient) + self.check_grad( + ["X", "Y"], + "Out", + user_defined_grads=self.gradient, + check_eager=True) class TestDistOpCase1(TestDistOp): @@ -174,4 +179,5 @@ class TestDistAPI(unittest.TestCase): if __name__ == '__main__': + paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_erfinv_op.py b/python/paddle/fluid/tests/unittests/test_erfinv_op.py index 847a868dd6c..5b5a7c03843 100644 --- a/python/paddle/fluid/tests/unittests/test_erfinv_op.py +++ b/python/paddle/fluid/tests/unittests/test_erfinv_op.py @@ -28,6 +28,7 @@ np.random.seed(0) class TestErfinv(OpTest): def setUp(self): self.op_type = "erfinv" + self.python_api = paddle.erfinv self.init_dtype() self.shape = [11, 17] self.x = np.random.uniform(-1, 1, size=self.shape).astype(self.dtype) @@ -42,7 +43,7 @@ class TestErfinv(OpTest): self.dtype = np.float64 def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): self.check_grad( diff --git a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py index a204c26c1b8..70b3fda79b5 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py @@ -27,6 +27,7 @@ class TestExpandV2OpRank1(OpTest): def setUp(self): self.op_type = "expand_v2" self.init_data() + self.python_api = paddle.expand self.inputs = {'X': np.random.random(self.ori_shape).astype("float64")} self.attrs = {'shape': self.shape} diff --git a/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py b/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py index e423404d07f..7c706eabd1d 100644 --- a/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py @@ -17,6 +17,7 @@ import paddle.fluid.core as core import paddle import paddle.fluid as fluid from paddle.fluid import Program, program_guard +from paddle.fluid.framework import _test_eager_guard paddle.enable_static() @@ -177,12 +178,17 @@ class TestGumbelSoftmaxAPI(unittest.TestCase): self.assertEqual(out_np.sum(), self.count_expected) # test dygrapg api - paddle.disable_static() - x = paddle.to_tensor(self.x) - y = paddle.nn.functional.gumbel_softmax(x, hard=True) - out_np = np.array(y) - self.assertEqual(out_np.sum(), self.count_expected) - paddle.enable_static() + with paddle.fluid.dygraph.base.guard(): + x = paddle.to_tensor(self.x) + y = paddle.nn.functional.gumbel_softmax(x, hard=True) + out_np = np.array(y) + self.assertEqual(out_np.sum(), self.count_expected) + + with _test_eager_guard(): + x = paddle.to_tensor(self.x) + y = paddle.nn.functional.gumbel_softmax(x, hard=True) + out_np = np.array(y) + self.assertEqual(out_np.sum(), self.count_expected) class TestGumbelSoftmaxOpError(unittest.TestCase): diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 551f4c7b29d..5b241a60fb3 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -1522,6 +1522,9 @@ def gumbel_softmax(x, temperature=1.0, hard=False, axis=-1, name=None): # [0.00000000, 0.00000000, 0.00000000, 0.00001258, 0.99998736, 0.00000000]] """ + if in_dygraph_mode(): + return _C_ops.final_state_gumbel_softmax(x, temperature, hard, axis) + if in_dynamic_mode(): return _C_ops.gumbel_softmax(x, 'temperature', temperature, 'hard', hard, 'axis', axis) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 07a7b503d3b..0c764692f43 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -551,6 +551,9 @@ def dist(x, y, p=2, name=None): out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ + if in_dygraph_mode(): + return _C_ops.final_state_dist(x, y, p) + check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 9751892e701..1fa8e9f698f 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -3634,6 +3634,9 @@ def erfinv(x, name=None): # out: [0, 0.4769, -inf] """ + if in_dygraph_mode(): + return _C_ops.final_state_erfinv( x ) + check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'erfinv') if paddle.in_dynamic_mode(): diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 8e0f39d5c7b..e730643ca11 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -772,7 +772,7 @@ func : GumbelSoftmaxInferMeta kernel : func : gumbel_softmax - # backward : gumbel_softmax_grad + backward : gumbel_softmax_grad # hard_shrink - api : hard_shrink diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index 72113389c13..c057976332c 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -1,13 +1,3 @@ -# - backward_api : gumbel_softmax_grad -# forward : gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out) -# args : (Tensor out, Tensor out_grad, int axis) -# output : Tensor(x_grad) -# infer_meta : -# func : GumbelSoftmaxGradInferMeta -# param : [out, out_grad, axis] -# kernel : -# func : gumbel_softmax_grad - - backward_api : abs_grad forward : abs (Tensor x) -> Tensor(out) args : (Tensor x, Tensor out_grad) @@ -517,6 +507,27 @@ kernel : func : gelu_grad +- backward_api : graph_send_recv_grad + forward : graph_send_recv (Tensor x, Tensor src_index, Tensor dst_index, str pool_type = "SUM", int64_t out_size = 0) -> Tensor(out), Tensor(dst_count) + args : (Tensor x, Tensor src_index, Tensor dst_index, Tensor out, Tensor dst_count, Tensor out_grad, str pool_type = "SUM") + output : Tensor(x_grad) + infer_meta : + func : GeneralUnaryGradInferMeta + param : [x] + kernel : + func : graph_send_recv_grad + optional: out, dst_count + +- backward_api : gumbel_softmax_grad + forward : gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out) + args : (Tensor out, Tensor out_grad, int axis) + output : Tensor(x_grad) + infer_meta : + func : GumbelSoftmaxGradInferMeta + param : [out, out_grad, axis] + kernel : + func : gumbel_softmax_grad + - backward_api : hard_shrink_grad forward : hard_shrink (Tensor x, float threshold) -> Tensor(out) args : (Tensor x, Tensor out_grad, float threshold) -- GitLab