未验证 提交 0d28edfa 编写于 作者: X xiongkun 提交者: GitHub

add yaml for ele_max ele_min. (#41161)

* add yaml for ele_max ele_min

* fig

* push

* xxx
上级 01724b1a
......@@ -40,6 +40,7 @@ from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, c
import paddle
from paddle.utils import deprecated
from paddle import _C_ops
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
__all__ = [
'fc',
......@@ -204,7 +205,6 @@ def _elementwise_op_in_dygraph(x,
op_name=None):
op = getattr(_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn)
......
......@@ -781,10 +781,12 @@ class OpTest(unittest.TestCase):
if arg_name in api_ignore_param_list:
results.append(get_default(idx, api_defaults))
else:
assert idx_of_op_proto_arguments < len(
input_arguments), "Assert False."
tmp = input_arguments[idx_of_op_proto_arguments]
idx_of_op_proto_arguments += 1
if (idx_of_op_proto_arguments < len(input_arguments)):
tmp = input_arguments[idx_of_op_proto_arguments]
idx_of_op_proto_arguments += 1
else:
tmp = Empty() # use the default value
if isinstance(tmp, Empty):
results.append(get_default(idx, api_defaults))
else:
......@@ -1356,6 +1358,9 @@ class OpTest(unittest.TestCase):
self.op_test = op_test # stop the op_test object.
self.op_type = op_test.op_type
def init(self):
pass
def convert_uint16_to_float(self, actual_np, expect_np):
raise NotImplementedError("base class, not implement!")
......@@ -1387,7 +1392,7 @@ class OpTest(unittest.TestCase):
rtol=self.rtol if hasattr(self, 'rtol') else 1e-5,
equal_nan=equal_nan),
"Output (" + name + ") has diff at " + str(place) + " in " +
self.checker_name + " checker")
self.checker_name)
def _compare_list(self, name, actual, expect):
""" if expect is a tuple, we need to compare list.
......@@ -1403,7 +1408,7 @@ class OpTest(unittest.TestCase):
# NOTE(zhiqiu): np.allclose([], [1.]) returns True
# see details: https://stackoverflow.com/questions/38331703/why-does-numpys-broadcasting-sometimes-allow-comparing-arrays-of-different-leng
if expect_np.size == 0:
self.op_test.assertTrue(actual_np.size == 0)
self.op_test.assertTrue(actual_np.size == 0) # }}}
self._compare_numpy(name, actual_np, expect_np)
if isinstance(expect, tuple):
self._compare_list(name, actual, expect)
......@@ -1431,10 +1436,14 @@ class OpTest(unittest.TestCase):
the main enter point of Checker class
"""
self.init()
self.calculate_output()
self.compare_outputs_with_expects()
class StaticChecker(Checker):
def init(self):
self.checker_name = "static checker"
def calculate_output(self):
outs, fetch_list = self.op_test._calc_output(
place, no_check_set=no_check_set)
......@@ -1474,6 +1483,9 @@ class OpTest(unittest.TestCase):
"Output (" + name + ") has different lod at " + str(place))
class DygraphChecker(Checker):
def init(self):
self.checker_name = "dygraph checker"
def calculate_output(self):
self.outputs = self.op_test._calc_dygraph_output(
place, no_check_set=no_check_set)
......@@ -1519,18 +1531,21 @@ class OpTest(unittest.TestCase):
rtol=self.rtol if hasattr(self, 'rtol') else 1e-5,
equal_nan=equal_nan),
"Output (" + name + ") has diff at " + str(place) +
" in " + self.checker_name + " checker")
" in " + self.checker_name)
class EagerChecker(DygraphChecker):
def init(self):
self.checker_name = "eager checker"
def calculate_output(self):
# we only check end2end api when check_eager=True
self.is_python_api_test = True
with _test_eager_guard():
self.is_python_api_test = True
eager_dygraph_outs = self.op_test._calc_python_api_output(
place)
if eager_dygraph_outs is None:
# missing KernelSignature, fall back to eager middle output.
self.is_python_api_test = False
# missing KernelSignature, fall back to eager middle output.
eager_dygraph_outs = self.op_test._calc_dygraph_output(
place, no_check_set=no_check_set)
self.outputs = eager_dygraph_outs
......
......@@ -20,11 +20,13 @@ from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
import os
import re
import paddle.fluid.core as core
import paddle
class TestElementwiseOp(OpTest):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
......@@ -35,10 +37,16 @@ class TestElementwiseOp(OpTest):
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
self.check_output()
if hasattr(self, 'attrs'):
self.check_output(check_eager=False)
else:
self.check_output(check_eager=True)
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out')
if hasattr(self, 'attrs'):
self.check_grad(['X', 'Y'], 'Out', check_eager=False)
else:
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ingore_x(self):
self.check_grad(
......@@ -55,6 +63,7 @@ class TestElementwiseOp(OpTest):
class TestElementwiseBF16Op(OpTest):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.dtype = np.uint16
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
......@@ -69,10 +78,16 @@ class TestElementwiseBF16Op(OpTest):
self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))}
def test_check_output(self):
self.check_output()
if hasattr(self, 'attrs'):
self.check_output(check_eager=False)
else:
self.check_output(check_eager=True)
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out')
if hasattr(self, 'attrs'):
self.check_grad(['X', 'Y'], 'Out', check_eager=False)
else:
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ingore_x(self):
self.check_grad(['Y'], 'Out', no_grad_set=set("X"))
......@@ -86,6 +101,7 @@ class TestElementwiseBF16Op(OpTest):
class TestElementwiseMaxOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float64")
y = np.array([0.5]).astype("float64")
self.inputs = {'X': x, 'Y': y}
......@@ -95,6 +111,7 @@ class TestElementwiseMaxOp_scalar(TestElementwiseOp):
class TestElementwiseMaxOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.random((100, )).astype("float64")
sgn = np.random.choice([-1, 1], (100, )).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float64")
......@@ -105,6 +122,7 @@ class TestElementwiseMaxOp_Vector(TestElementwiseOp):
class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float64)
y = x[:, 0, 0] + sgn * \
......@@ -121,6 +139,7 @@ class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float64)
y = x[0, :, 0] + sgn * \
......@@ -137,6 +156,7 @@ class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float64)
y = x[0, 0, :] + sgn * \
......@@ -152,6 +172,7 @@ class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float64)
sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float64)
y = x[0, :, :, 0] + sgn * \
......@@ -168,6 +189,7 @@ class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float64)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float64)
y = x + sgn * \
......
......@@ -27,6 +27,7 @@ paddle.enable_static()
class TestElementwiseOp(OpTest):
def setUp(self):
self.op_type = "elementwise_min"
self.python_api = paddle.minimum
# If x and y have the same value, the min() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
......@@ -37,10 +38,16 @@ class TestElementwiseOp(OpTest):
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
self.check_output()
if hasattr(self, 'attrs'):
self.check_output(check_eager=False)
else:
self.check_output(check_eager=True)
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out')
if hasattr(self, 'attrs'):
self.check_grad(['X', 'Y'], 'Out', check_eager=False)
else:
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ingore_x(self):
self.check_grad(
......@@ -56,6 +63,7 @@ class TestElementwiseOp(OpTest):
class TestElementwiseMinOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
self.python_api = paddle.minimum
x = np.random.random_integers(-5, 5, [10, 3, 4]).astype("float64")
y = np.array([0.5]).astype("float64")
self.inputs = {'X': x, 'Y': y}
......@@ -65,6 +73,7 @@ class TestElementwiseMinOp_scalar(TestElementwiseOp):
class TestElementwiseMinOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
self.python_api = paddle.minimum
x = np.random.random((100, )).astype("float64")
sgn = np.random.choice([-1, 1], (100, )).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float64")
......@@ -75,6 +84,7 @@ class TestElementwiseMinOp_Vector(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
self.python_api = paddle.minimum
x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float64)
y = x[:, 0, 0] + sgn * \
......@@ -91,6 +101,7 @@ class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
self.python_api = paddle.minimum
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float64)
y = x[0, :, 0] + sgn * \
......@@ -107,6 +118,7 @@ class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
self.python_api = paddle.minimum
x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float64)
y = x[0, 0, :] + sgn * \
......@@ -122,6 +134,7 @@ class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
self.python_api = paddle.minimum
x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(np.float64)
sgn = np.random.choice([-1, 1], (25, 4)).astype(np.float64)
y = x[0, :, :, 0] + sgn * \
......@@ -138,6 +151,7 @@ class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
self.python_api = paddle.minimum
x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(np.float64)
sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(np.float64)
y = x + sgn * \
......
......@@ -177,6 +177,12 @@ def pow(x, y, name=None):
raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y)))
OP_NAMEMAPPING = {
'elementwise_max': 'final_state_maximum',
'elementwise_min': 'final_state_minimum',
'elementwise_pow': 'final_state_elementwise_pow',
'elementwise_floordiv': 'final_state_floor_divide',
}
@dygraph_only
def _elementwise_op_in_dygraph(x,
......@@ -185,13 +191,20 @@ def _elementwise_op_in_dygraph(x,
act=None,
use_mkldnn=False,
op_name=None):
op = getattr(_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
def is_inplace(op_name):
return op_name[-1] == "_"
if in_dygraph_mode():
op = getattr(_C_ops, OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name)
out = op(x, y)
if _in_legacy_dygraph():
op = getattr(_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn)
def _elementwise_op(helper):
op_type = helper.layer_type
original_op_type = helper.kwargs.get('original_op_type', op_type)
......
......@@ -744,6 +744,15 @@
func : matrix_power
backward : matrix_power_grad
- api : maximum
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
kernel :
func : maximum
backward : maximum_grad
- api : mean
args : (Tensor x, int64_t[] axis={}, bool keep_dim=false)
output : Tensor
......@@ -752,6 +761,15 @@
kernel :
func : mean
- api : minimum
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
kernel :
func : minimum
backward : minimum_grad
- api : modulo
args : (Tensor x, Tensor y)
output : Tensor
......
......@@ -408,6 +408,26 @@
kernel :
func : matrix_power_grad
- backward_api : maximum_grad
forward : maximum(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, y]
kernel :
func : maximum_grad
- backward_api : minimum_grad
forward : minimum(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, y]
kernel :
func : minimum_grad
- backward_api : modulo_grad
forward : add (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册