未验证 提交 3152f3fb 编写于 作者: X xiongkun 提交者: GitHub

[Yaml] add yaml for gather op and elementwise_mod op . (#41348)

* gather op

* add mod
上级 fd591ecb
...@@ -2326,7 +2326,7 @@ class TestPow(TestActivation): ...@@ -2326,7 +2326,7 @@ class TestPow(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "pow" self.op_type = "pow"
self.python_api = paddle.pow self.python_api = paddle.pow
self.check_eager = False self.check_eager = True
self.init_dtype() self.init_dtype()
np.random.seed(1024) np.random.seed(1024)
...@@ -2337,6 +2337,9 @@ class TestPow(TestActivation): ...@@ -2337,6 +2337,9 @@ class TestPow(TestActivation):
self.attrs = {'factor': 3.0} self.attrs = {'factor': 3.0}
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=self.check_eager)
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
......
...@@ -29,6 +29,7 @@ class TestElementwiseModOp(OpTest): ...@@ -29,6 +29,7 @@ class TestElementwiseModOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_mod" self.op_type = "elementwise_mod"
self.python_api = paddle.remainder
self.axis = -1 self.axis = -1
self.init_dtype() self.init_dtype()
self.init_input_output() self.init_input_output()
...@@ -43,7 +44,10 @@ class TestElementwiseModOp(OpTest): ...@@ -43,7 +44,10 @@ class TestElementwiseModOp(OpTest):
self.outputs = {'Out': self.out} self.outputs = {'Out': self.out}
def test_check_output(self): def test_check_output(self):
self.check_output() if self.attrs['axis'] == -1:
self.check_output(check_eager=True)
else:
self.check_output(check_eager=False)
def init_input_output(self): def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype) self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
...@@ -76,7 +80,10 @@ class TestElementwiseModOpFloat(TestElementwiseModOp): ...@@ -76,7 +80,10 @@ class TestElementwiseModOpFloat(TestElementwiseModOp):
self.out = np.fmod(self.y + np.fmod(self.x, self.y), self.y) self.out = np.fmod(self.y + np.fmod(self.x, self.y), self.y)
def test_check_output(self): def test_check_output(self):
self.check_output() if self.attrs['axis'] == -1:
self.check_output(check_eager=True)
else:
self.check_output(check_eager=False)
class TestElementwiseModOpDouble(TestElementwiseModOpFloat): class TestElementwiseModOpDouble(TestElementwiseModOpFloat):
......
...@@ -43,10 +43,10 @@ class TestGatherOp(OpTest): ...@@ -43,10 +43,10 @@ class TestGatherOp(OpTest):
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False) self.check_grad(['X'], 'Out', check_eager=True)
def config(self): def config(self):
""" """
...@@ -136,10 +136,10 @@ class TestGatherBF16Op(OpTest): ...@@ -136,10 +136,10 @@ class TestGatherBF16Op(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=0.5, check_eager=False) self.check_grad(['X'], 'Out', numeric_grad_delta=0.5, check_eager=True)
def config(self): def config(self):
""" """
...@@ -165,10 +165,10 @@ class TestGatherOp1(OpTest): ...@@ -165,10 +165,10 @@ class TestGatherOp1(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=False) self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False) self.check_grad(['X'], 'Out', check_eager=True)
def config(self): def config(self):
""" """
......
...@@ -1391,9 +1391,9 @@ def gather(x, index, axis=None, name=None): ...@@ -1391,9 +1391,9 @@ def gather(x, index, axis=None, name=None):
if axis is None: if axis is None:
axis = 0 axis = 0
#if in_dygraph_mode(): if in_dygraph_mode():
#return _C_ops.final_state_gather(x, index, axis) return _C_ops.final_state_gather(x, index, axis)
if _non_static_mode(): if _in_legacy_dygraph():
axis = axis.item() if isinstance(axis, paddle.Tensor) else axis axis = axis.item() if isinstance(axis, paddle.Tensor) else axis
return _C_ops.gather(x, index, None, "axis", axis, "overwrite", False) return _C_ops.gather(x, index, None, "axis", axis, "overwrite", False)
......
...@@ -150,41 +150,38 @@ def pow(x, y, name=None): ...@@ -150,41 +150,38 @@ def pow(x, y, name=None):
""" """
# in dynamic graph mode # in dynamic graph mode
#if in_dygraph_mode(): if in_dygraph_mode():
#if isinstance(y, (int, float)):
#return _C_ops.final_state_pow(x, y)
#elif isinstance(y, (paddle.Tensor, Variable)):
#return _elementwise_op_in_dygraph(
#x, y, axis=-1, act=None, op_name='elementwise_pow')
#else:
#raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
#if _in_legacy_dygraph():
if _non_static_mode():
if isinstance(y, (int, float)): if isinstance(y, (int, float)):
return _C_ops.pow(x, 'factor', y) return _C_ops.final_state_pow(x, y)
elif isinstance(y, (paddle.Tensor, Variable)): elif isinstance(y, (paddle.Tensor, Variable)):
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(
x, y, axis=-1, act=None, op_name='elementwise_pow') x, y, axis=-1, act=None, op_name='elementwise_pow')
else: else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype)) raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
# in static graph mode if _in_legacy_dygraph():
else:
if isinstance(y, (int, float)): if isinstance(y, (int, float)):
helper = LayerHelper('pow', **locals()) return _C_ops.pow(x, 'factor', y)
inputs = {'X': x}
attrs = {'factor': y}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
elif isinstance(y, (paddle.Tensor, Variable)): elif isinstance(y, (paddle.Tensor, Variable)):
# TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here return _elementwise_op_in_dygraph(
helper = LayerHelper('elementwise_pow', **locals()) x, y, axis=-1, act=None, op_name='elementwise_pow')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
else: else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y))) raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
# in static graph mode
if isinstance(y, (int, float)):
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {'factor': y}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
elif isinstance(y, (paddle.Tensor, Variable)):
# TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
helper = LayerHelper('elementwise_pow', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y)))
OP_NAMEMAPPING = { OP_NAMEMAPPING = {
...@@ -192,6 +189,7 @@ OP_NAMEMAPPING = { ...@@ -192,6 +189,7 @@ OP_NAMEMAPPING = {
'elementwise_min': 'final_state_minimum', 'elementwise_min': 'final_state_minimum',
'elementwise_pow': 'final_state_elementwise_pow', 'elementwise_pow': 'final_state_elementwise_pow',
'elementwise_floordiv': 'final_state_floor_divide', 'elementwise_floordiv': 'final_state_floor_divide',
'elementwise_mod': 'final_state_modulo',
} }
@dygraph_only @dygraph_only
......
...@@ -632,6 +632,16 @@ ...@@ -632,6 +632,16 @@
data_type : dtype > x data_type : dtype > x
backend : place > x backend : place > x
- api : gather
args : (Tensor x, Tensor index, Scalar axis=0)
output : Tensor(out)
infer_meta :
func : GatherInferMeta
kernel :
func : gather
data_type: x
backward : gather_grad
- api : gather_nd - api : gather_nd
args : (Tensor x, Tensor index) args : (Tensor x, Tensor index)
output : Tensor output : Tensor
...@@ -1220,6 +1230,16 @@ ...@@ -1220,6 +1230,16 @@
func : pool3d func : pool3d
backward : pool3d_grad backward : pool3d_grad
- api : pow
args : (Tensor x, Scalar s)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pow
backward : pow_grad
- api : prelu - api : prelu
args : (Tensor x, Tensor alpha, str data_format, str mode) args : (Tensor x, Tensor alpha, str data_format, str mode)
output : Tensor(out) output : Tensor(out)
......
...@@ -178,7 +178,7 @@ ...@@ -178,7 +178,7 @@
output : Tensor(x_grad), Tensor(filter_grad) output : Tensor(x_grad), Tensor(filter_grad)
infer_meta : infer_meta :
func : ConvTransposeGradInferMeta func : ConvTransposeGradInferMeta
kernel : kernel :
func : conv2d_transpose_grad func : conv2d_transpose_grad
- backward_api : conv3d_transpose_grad - backward_api : conv3d_transpose_grad
...@@ -389,6 +389,17 @@ ...@@ -389,6 +389,17 @@
kernel : kernel :
func : frobenius_norm_grad func : frobenius_norm_grad
- backward_api : gather_grad
forward : gather(Tensor x, Tensor index, Scalar axis=0) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad, Scalar axis=0, bool overwrite=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
data_type: x
func : gather_grad
- backward_api : gather_nd_grad - backward_api : gather_nd_grad
forward : gather_nd (Tensor x, Tensor index) -> Tensor(out) forward : gather_nd (Tensor x, Tensor index) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad) args : (Tensor x, Tensor index, Tensor out_grad)
...@@ -803,6 +814,16 @@ ...@@ -803,6 +814,16 @@
kernel : kernel :
func : pool3d_grad func : pool3d_grad
- backward_api : pow_grad
forward : pow(Tensor x, Scalar s) -> Tensor(out)
args : (Tensor x, Tensor out_grad, Scalar s=-1)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pow_grad
- backward_api : prelu_grad - backward_api : prelu_grad
forward : prelu(Tensor x, Tensor alpha, str data_format, str mode) -> Tensor(out) forward : prelu(Tensor x, Tensor alpha, str data_format, str mode) -> Tensor(out)
args : (Tensor x, Tensor alpha, Tensor out_grad, str data_format, str mode) args : (Tensor x, Tensor alpha, Tensor out_grad, str data_format, str mode)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册