diff --git a/python/paddle/fluid/tests/unittests/test_clip_op.py b/python/paddle/fluid/tests/unittests/test_clip_op.py index 74c5f693a37f1f0a480a14465e87be97cabb8f9f..f4423ccd0294cc8cdf88a20b75c9f6f32dddd3db 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_op.py @@ -20,11 +20,13 @@ import paddle import paddle.fluid as fluid from paddle.fluid import Program, program_guard from op_test import OpTest +from paddle.fluid.framework import _test_eager_guard class TestClipOp(OpTest): def setUp(self): self.max_relative_error = 0.006 + self.python_api = paddle.clip self.inputs = {} self.initTestCase() @@ -51,12 +53,12 @@ class TestClipOp(OpTest): def test_check_output(self): paddle.enable_static() - self.check_output() + self.check_output(check_eager=True) paddle.disable_static() def test_check_grad_normal(self): paddle.enable_static() - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) paddle.disable_static() def initTestCase(self): @@ -228,6 +230,10 @@ class TestClipAPI(unittest.TestCase): self.assertTrue( np.allclose(out_5.numpy(), (data * 10).astype(np.int64).clip(2, 8))) + def test_eager(self): + with _test_eager_guard(): + self.test_clip_dygraph() + def test_errors(self): paddle.enable_static() x1 = fluid.data(name='x1', shape=[1], dtype="int16") diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index d2ed985fb86516e5f78c953c769fc53f32c47de9..e4faa573ffb265f00cf2c1961ecd56bdfdb9ce35 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -2290,7 +2290,16 @@ def clip(x, min=None, max=None, name=None): min_ = float(np.finfo(np.float32).min) max_ = float(np.finfo(np.float32).max) - if paddle.in_dynamic_mode(): + if in_dygraph_mode(): + if isinstance(min, Variable): + min = min.numpy().item(0) + if isinstance(max, Variable): + max = max.numpy().item(0) + min = min_ if min is None else min + max = max_ if max is None else max + return _C_ops.final_state_clip(x, min, max) + + if _in_legacy_dygraph(): if isinstance(min, Variable): min = min.numpy().item(0) if isinstance(max, Variable): @@ -2350,7 +2359,12 @@ def clip_(x, min=None, max=None, name=None): max = max.numpy().item(0) min = fmin if min is None else min max = fmax if max is None else max - return _C_ops.clip_(x, "min", min, "max", max) + + if in_dygraph_mode(): + return _C_ops.final_state_clip_(x, min, max) + + if _in_legacy_dygraph(): + return _C_ops.clip_(x, "min", min, "max", max) diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 4c17644792fbdeee9e19559d6f50b801be6303ef..08cf04f692806d720325bae56aaa127b49c071da 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -312,6 +312,17 @@ func : cholesky_solve backward : cholesky_solve_grad +- api : clip + args : (Tensor x, Scalar(float) min, Scalar(float) max) + output : Tensor(out) + inplace : (x -> out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : clip + backward : clip_grad + - api : concat args : (Tensor[] x, Scalar(int64_t) axis) output : Tensor diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index da60dae43169542909a4d108d95630eeaf3aa635..570e64dcd5e12eeedf199398becbf0503ba0d89b 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -179,6 +179,16 @@ kernel : func : cholesky_solve_grad +- backward_api : clip_grad + forward : clip (Tensor x, Scalar min, Scalar max) -> Tensor(out) + args : (Tensor x, Tensor out_grad, Scalar min = 0., Scalar max = 0.) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : clip_grad + - backward_api : concat_grad forward : concat (Tensor[] x, Scalar axis) -> Tensor(out) args : (Tensor[] x, Tensor out_grad, Scalar axis = 0)