From 489b8a88a1cd10a4d09ec29ffa23b0834d9b3faf Mon Sep 17 00:00:00 2001 From: chentianyu03 Date: Mon, 4 Apr 2022 19:30:16 +0800 Subject: [PATCH] [Yaml]add clip yaml (#41337) * add clip yaml * import _test_eager_guad * add default value to scalar * add clip_grad default value * fix test failed --- .../fluid/tests/unittests/test_clip_op.py | 10 ++++++++-- python/paddle/tensor/math.py | 18 ++++++++++++++++-- python/paddle/utils/code_gen/api.yaml | 11 +++++++++++ python/paddle/utils/code_gen/backward.yaml | 10 ++++++++++ 4 files changed, 45 insertions(+), 4 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_clip_op.py b/python/paddle/fluid/tests/unittests/test_clip_op.py index 74c5f693a37..f4423ccd029 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_op.py @@ -20,11 +20,13 @@ import paddle import paddle.fluid as fluid from paddle.fluid import Program, program_guard from op_test import OpTest +from paddle.fluid.framework import _test_eager_guard class TestClipOp(OpTest): def setUp(self): self.max_relative_error = 0.006 + self.python_api = paddle.clip self.inputs = {} self.initTestCase() @@ -51,12 +53,12 @@ class TestClipOp(OpTest): def test_check_output(self): paddle.enable_static() - self.check_output() + self.check_output(check_eager=True) paddle.disable_static() def test_check_grad_normal(self): paddle.enable_static() - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) paddle.disable_static() def initTestCase(self): @@ -228,6 +230,10 @@ class TestClipAPI(unittest.TestCase): self.assertTrue( np.allclose(out_5.numpy(), (data * 10).astype(np.int64).clip(2, 8))) + def test_eager(self): + with _test_eager_guard(): + self.test_clip_dygraph() + def test_errors(self): paddle.enable_static() x1 = fluid.data(name='x1', shape=[1], dtype="int16") diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index d2ed985fb86..e4faa573ffb 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -2290,7 +2290,16 @@ def clip(x, min=None, max=None, name=None): min_ = float(np.finfo(np.float32).min) max_ = float(np.finfo(np.float32).max) - if paddle.in_dynamic_mode(): + if in_dygraph_mode(): + if isinstance(min, Variable): + min = min.numpy().item(0) + if isinstance(max, Variable): + max = max.numpy().item(0) + min = min_ if min is None else min + max = max_ if max is None else max + return _C_ops.final_state_clip(x, min, max) + + if _in_legacy_dygraph(): if isinstance(min, Variable): min = min.numpy().item(0) if isinstance(max, Variable): @@ -2350,7 +2359,12 @@ def clip_(x, min=None, max=None, name=None): max = max.numpy().item(0) min = fmin if min is None else min max = fmax if max is None else max - return _C_ops.clip_(x, "min", min, "max", max) + + if in_dygraph_mode(): + return _C_ops.final_state_clip_(x, min, max) + + if _in_legacy_dygraph(): + return _C_ops.clip_(x, "min", min, "max", max) diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 4c17644792f..08cf04f6928 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -312,6 +312,17 @@ func : cholesky_solve backward : cholesky_solve_grad +- api : clip + args : (Tensor x, Scalar(float) min, Scalar(float) max) + output : Tensor(out) + inplace : (x -> out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : clip + backward : clip_grad + - api : concat args : (Tensor[] x, Scalar(int64_t) axis) output : Tensor diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index da60dae4316..570e64dcd5e 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -179,6 +179,16 @@ kernel : func : cholesky_solve_grad +- backward_api : clip_grad + forward : clip (Tensor x, Scalar min, Scalar max) -> Tensor(out) + args : (Tensor x, Tensor out_grad, Scalar min = 0., Scalar max = 0.) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : clip_grad + - backward_api : concat_grad forward : concat (Tensor[] x, Scalar axis) -> Tensor(out) args : (Tensor[] x, Tensor out_grad, Scalar axis = 0) -- GitLab