未验证 提交 489b8a88 编写于 作者: C chentianyu03 提交者: GitHub

[Yaml]add clip yaml (#41337)

* add clip yaml

* import _test_eager_guad

* add default value to scalar

* add clip_grad default value

* fix test failed
上级 fa250aa1
......@@ -20,11 +20,13 @@ import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from op_test import OpTest
from paddle.fluid.framework import _test_eager_guard
class TestClipOp(OpTest):
def setUp(self):
self.max_relative_error = 0.006
self.python_api = paddle.clip
self.inputs = {}
self.initTestCase()
......@@ -51,12 +53,12 @@ class TestClipOp(OpTest):
def test_check_output(self):
paddle.enable_static()
self.check_output()
self.check_output(check_eager=True)
paddle.disable_static()
def test_check_grad_normal(self):
paddle.enable_static()
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
paddle.disable_static()
def initTestCase(self):
......@@ -228,6 +230,10 @@ class TestClipAPI(unittest.TestCase):
self.assertTrue(
np.allclose(out_5.numpy(), (data * 10).astype(np.int64).clip(2, 8)))
def test_eager(self):
with _test_eager_guard():
self.test_clip_dygraph()
def test_errors(self):
paddle.enable_static()
x1 = fluid.data(name='x1', shape=[1], dtype="int16")
......
......@@ -2290,7 +2290,16 @@ def clip(x, min=None, max=None, name=None):
min_ = float(np.finfo(np.float32).min)
max_ = float(np.finfo(np.float32).max)
if paddle.in_dynamic_mode():
if in_dygraph_mode():
if isinstance(min, Variable):
min = min.numpy().item(0)
if isinstance(max, Variable):
max = max.numpy().item(0)
min = min_ if min is None else min
max = max_ if max is None else max
return _C_ops.final_state_clip(x, min, max)
if _in_legacy_dygraph():
if isinstance(min, Variable):
min = min.numpy().item(0)
if isinstance(max, Variable):
......@@ -2350,7 +2359,12 @@ def clip_(x, min=None, max=None, name=None):
max = max.numpy().item(0)
min = fmin if min is None else min
max = fmax if max is None else max
return _C_ops.clip_(x, "min", min, "max", max)
if in_dygraph_mode():
return _C_ops.final_state_clip_(x, min, max)
if _in_legacy_dygraph():
return _C_ops.clip_(x, "min", min, "max", max)
......
......@@ -312,6 +312,17 @@
func : cholesky_solve
backward : cholesky_solve_grad
- api : clip
args : (Tensor x, Scalar(float) min, Scalar(float) max)
output : Tensor(out)
inplace : (x -> out)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : clip
backward : clip_grad
- api : concat
args : (Tensor[] x, Scalar(int64_t) axis)
output : Tensor
......
......@@ -179,6 +179,16 @@
kernel :
func : cholesky_solve_grad
- backward_api : clip_grad
forward : clip (Tensor x, Scalar min, Scalar max) -> Tensor(out)
args : (Tensor x, Tensor out_grad, Scalar min = 0., Scalar max = 0.)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : clip_grad
- backward_api : concat_grad
forward : concat (Tensor[] x, Scalar axis) -> Tensor(out)
args : (Tensor[] x, Tensor out_grad, Scalar axis = 0)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册