未验证 提交 3152f3fb 编写于 作者: X xiongkun 提交者: GitHub

[Yaml] add yaml for gather op and elementwise_mod op . (#41348)

* gather op

* add mod
上级 fd591ecb
......@@ -2326,7 +2326,7 @@ class TestPow(TestActivation):
def setUp(self):
self.op_type = "pow"
self.python_api = paddle.pow
self.check_eager = False
self.check_eager = True
self.init_dtype()
np.random.seed(1024)
......@@ -2337,6 +2337,9 @@ class TestPow(TestActivation):
self.attrs = {'factor': 3.0}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=self.check_eager)
def test_check_grad(self):
if self.dtype == np.float16:
return
......
......@@ -29,6 +29,7 @@ class TestElementwiseModOp(OpTest):
def setUp(self):
self.op_type = "elementwise_mod"
self.python_api = paddle.remainder
self.axis = -1
self.init_dtype()
self.init_input_output()
......@@ -43,7 +44,10 @@ class TestElementwiseModOp(OpTest):
self.outputs = {'Out': self.out}
def test_check_output(self):
self.check_output()
if self.attrs['axis'] == -1:
self.check_output(check_eager=True)
else:
self.check_output(check_eager=False)
def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
......@@ -76,7 +80,10 @@ class TestElementwiseModOpFloat(TestElementwiseModOp):
self.out = np.fmod(self.y + np.fmod(self.x, self.y), self.y)
def test_check_output(self):
self.check_output()
if self.attrs['axis'] == -1:
self.check_output(check_eager=True)
else:
self.check_output(check_eager=False)
class TestElementwiseModOpDouble(TestElementwiseModOpFloat):
......
......@@ -43,10 +43,10 @@ class TestGatherOp(OpTest):
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False)
self.check_grad(['X'], 'Out', check_eager=True)
def config(self):
"""
......@@ -136,10 +136,10 @@ class TestGatherBF16Op(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=0.5, check_eager=False)
self.check_grad(['X'], 'Out', numeric_grad_delta=0.5, check_eager=True)
def config(self):
"""
......@@ -165,10 +165,10 @@ class TestGatherOp1(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False)
self.check_grad(['X'], 'Out', check_eager=True)
def config(self):
"""
......
......@@ -1391,9 +1391,9 @@ def gather(x, index, axis=None, name=None):
if axis is None:
axis = 0
#if in_dygraph_mode():
#return _C_ops.final_state_gather(x, index, axis)
if _non_static_mode():
if in_dygraph_mode():
return _C_ops.final_state_gather(x, index, axis)
if _in_legacy_dygraph():
axis = axis.item() if isinstance(axis, paddle.Tensor) else axis
return _C_ops.gather(x, index, None, "axis", axis, "overwrite", False)
......
......@@ -150,17 +150,15 @@ def pow(x, y, name=None):
"""
# in dynamic graph mode
#if in_dygraph_mode():
#if isinstance(y, (int, float)):
#return _C_ops.final_state_pow(x, y)
#elif isinstance(y, (paddle.Tensor, Variable)):
#return _elementwise_op_in_dygraph(
#x, y, axis=-1, act=None, op_name='elementwise_pow')
#else:
#raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
#if _in_legacy_dygraph():
if _non_static_mode():
if in_dygraph_mode():
if isinstance(y, (int, float)):
return _C_ops.final_state_pow(x, y)
elif isinstance(y, (paddle.Tensor, Variable)):
return _elementwise_op_in_dygraph(
x, y, axis=-1, act=None, op_name='elementwise_pow')
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
if _in_legacy_dygraph():
if isinstance(y, (int, float)):
return _C_ops.pow(x, 'factor', y)
elif isinstance(y, (paddle.Tensor, Variable)):
......@@ -169,7 +167,6 @@ def pow(x, y, name=None):
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
# in static graph mode
else:
if isinstance(y, (int, float)):
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
......@@ -192,6 +189,7 @@ OP_NAMEMAPPING = {
'elementwise_min': 'final_state_minimum',
'elementwise_pow': 'final_state_elementwise_pow',
'elementwise_floordiv': 'final_state_floor_divide',
'elementwise_mod': 'final_state_modulo',
}
@dygraph_only
......
......@@ -632,6 +632,16 @@
data_type : dtype > x
backend : place > x
- api : gather
args : (Tensor x, Tensor index, Scalar axis=0)
output : Tensor(out)
infer_meta :
func : GatherInferMeta
kernel :
func : gather
data_type: x
backward : gather_grad
- api : gather_nd
args : (Tensor x, Tensor index)
output : Tensor
......@@ -1220,6 +1230,16 @@
func : pool3d
backward : pool3d_grad
- api : pow
args : (Tensor x, Scalar s)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pow
backward : pow_grad
- api : prelu
args : (Tensor x, Tensor alpha, str data_format, str mode)
output : Tensor(out)
......
......@@ -389,6 +389,17 @@
kernel :
func : frobenius_norm_grad
- backward_api : gather_grad
forward : gather(Tensor x, Tensor index, Scalar axis=0) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad, Scalar axis=0, bool overwrite=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
data_type: x
func : gather_grad
- backward_api : gather_nd_grad
forward : gather_nd (Tensor x, Tensor index) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad)
......@@ -803,6 +814,16 @@
kernel :
func : pool3d_grad
- backward_api : pow_grad
forward : pow(Tensor x, Scalar s) -> Tensor(out)
args : (Tensor x, Tensor out_grad, Scalar s=-1)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pow_grad
- backward_api : prelu_grad
forward : prelu(Tensor x, Tensor alpha, str data_format, str mode) -> Tensor(out)
args : (Tensor x, Tensor alpha, Tensor out_grad, str data_format, str mode)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册