未验证 提交 143eceeb 编写于 作者: Y YuhangLi 提交者: GitHub

[AMP OP&Test] Append bf16/fp16 support 4 elementwise_max (#51151)

* wisemax fp16 support

* add bf16 support 4 elementwise_max

* append broadcast 4 op 4 fp16 / bf16

* fix elewise_max ut bf16 numeric delta

* append fp/bf16 uts

* add fp/bf16 uts

* change bf16 uts delta

* fix some issue

* add prim 4 fp16
上级 b4f49aa1
...@@ -22,18 +22,23 @@ import paddle.fluid.core as core ...@@ -22,18 +22,23 @@ import paddle.fluid.core as core
class TestElementwiseOp(OpTest): class TestElementwiseOp(OpTest):
def setUp(self): def init_data(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
self.enable_cinn = False
# If x and y have the same value, the max() is not differentiable. # If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method # So we generate test data by the following method
# to avoid them being too close to each other. # to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float64") self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float64") sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64") self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(
self.inputs = {'X': x, 'Y': y} "float64"
)
def setUp(self):
self.init_data()
self.op_type = "elementwise_max"
self.prim_op_type = "prim"
self.enable_cinn = False
self.python_api = paddle.maximum
self.inputs = {'X': self.x, 'Y': self.y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self): def test_check_output(self):
...@@ -90,40 +95,49 @@ class TestElementwiseOp(OpTest): ...@@ -90,40 +95,49 @@ class TestElementwiseOp(OpTest):
) )
class TestElementwiseFP16Op(TestElementwiseOp):
def init_data(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float16)
sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float16)
self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(
np.float16
)
class TestElementwiseMaxOp_ZeroDim1(TestElementwiseOp): class TestElementwiseMaxOp_ZeroDim1(TestElementwiseOp):
def setUp(self): def init_data(self):
self.op_type = "elementwise_max" self.x = np.random.uniform(0.1, 1, []).astype("float64")
self.python_api = paddle.maximum self.y = np.random.uniform(0.1, 1, []).astype("float64")
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.uniform(0.1, 1, []).astype("float64") class TestElementwiseMaxFP16Op_ZeroDim1(TestElementwiseOp):
y = np.random.uniform(0.1, 1, []).astype("float64") def init_data(self):
self.inputs = {'X': x, 'Y': y} self.x = np.random.uniform(0.1, 1, []).astype("float16")
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} self.y = np.random.uniform(0.1, 1, []).astype("float16")
class TestElementwiseMaxOp_ZeroDim2(TestElementwiseOp): class TestElementwiseMaxOp_ZeroDim2(TestElementwiseOp):
def setUp(self): def init_data(self):
self.op_type = "elementwise_max" self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
self.python_api = paddle.maximum self.y = np.random.uniform(0.1, 1, []).astype("float64")
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.uniform(0.1, 1, [13, 17]).astype("float64") class TestElementwiseMaxFP16Op_ZeroDim2(TestElementwiseOp):
y = np.random.uniform(0.1, 1, []).astype("float64") def init_data(self):
self.inputs = {'X': x, 'Y': y} self.x = np.random.uniform(0.1, 1, [13, 17]).astype("float16")
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} self.y = np.random.uniform(0.1, 1, []).astype("float16")
class TestElementwiseMaxOp_ZeroDim3(TestElementwiseOp): class TestElementwiseMaxOp_ZeroDim3(TestElementwiseOp):
def setUp(self): def init_data(self):
self.op_type = "elementwise_max" self.x = np.random.uniform(0.1, 1, []).astype("float64")
self.python_api = paddle.maximum self.y = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.uniform(0.1, 1, []).astype("float64") class TestElementwiseMaxFP16Op_ZeroDim3(TestElementwiseOp):
y = np.random.uniform(0.1, 1, [13, 17]).astype("float64") def init_data(self):
self.inputs = {'X': x, 'Y': y} self.x = np.random.uniform(0.1, 1, []).astype("float16")
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} self.y = np.random.uniform(0.1, 1, [13, 17]).astype("float16")
@unittest.skipIf( @unittest.skipIf(
...@@ -135,23 +149,30 @@ class TestElementwiseMaxOp_ZeroDim3(TestElementwiseOp): ...@@ -135,23 +149,30 @@ class TestElementwiseMaxOp_ZeroDim3(TestElementwiseOp):
"run test when gpu is availble and the minimum cudnn version is 8.1.0 and gpu's compute capability is at least 8.0.", "run test when gpu is availble and the minimum cudnn version is 8.1.0 and gpu's compute capability is at least 8.0.",
) )
class TestElementwiseBF16Op(OpTest): class TestElementwiseBF16Op(OpTest):
def init_data(self):
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float32)
self.y = self.x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(
np.float32
)
def setUp(self): def setUp(self):
self.init_data()
self.op_type = "elementwise_max" self.op_type = "elementwise_max"
self.python_api = paddle.maximum self.python_api = paddle.maximum
self.prim_op_type = "prim" self.prim_op_type = "prim"
self.enable_cinn = False self.enable_cinn = False
self.dtype = np.uint16 self.dtype = np.uint16
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
sgn = np.random.choice([-1, 1], [13, 17]).astype(np.float32)
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
self.inputs = { self.inputs = {
'X': convert_float_to_uint16(x), 'X': convert_float_to_uint16(self.x),
'Y': convert_float_to_uint16(y), 'Y': convert_float_to_uint16(self.y),
}
self.outputs = {
'Out': convert_float_to_uint16(np.maximum(self.x, self.y))
} }
self.outputs = {'Out': convert_float_to_uint16(np.maximum(x, y))}
def test_check_output(self): def test_check_output(self):
if hasattr(self, 'attrs'): if hasattr(self, 'attrs'):
...@@ -173,32 +194,79 @@ class TestElementwiseBF16Op(OpTest): ...@@ -173,32 +194,79 @@ class TestElementwiseBF16Op(OpTest):
self.check_grad(['X'], 'Out', no_grad_set=set('Y')) self.check_grad(['X'], 'Out', no_grad_set=set('Y'))
class TestElementwiseMaxBF16Op_ZeroDim1(TestElementwiseBF16Op):
def init_data(self):
self.x = np.random.uniform(0.1, 1, []).astype("float32")
self.y = np.random.uniform(0.1, 1, []).astype("float32")
def test_check_grad_normal(self):
if hasattr(self, 'attrs'):
self.check_grad(
['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=False
)
else:
self.check_grad(
['X', 'Y'], 'Out', numeric_grad_delta=0.05, check_eager=True
)
def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', numeric_grad_delta=0.05, no_grad_set=set("X")
)
def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', numeric_grad_delta=0.05, no_grad_set=set('Y')
)
class TestElementwiseMaxBF16Op_scalar(TestElementwiseBF16Op):
def init_data(self):
self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32")
self.y = np.array([0.5]).astype("float32")
self.__class__.no_need_check_grad = True
@skip_check_grad_ci( @skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast." reason="[skip shape check] Use y_shape(1) to test broadcast."
) )
class TestElementwiseMaxOp_scalar(TestElementwiseOp): class TestElementwiseMaxOp_scalar(TestElementwiseOp):
def setUp(self): def init_data(self):
self.op_type = "elementwise_max" self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float64")
self.python_api = paddle.maximum self.y = np.array([0.5]).astype("float64")
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float64") class TestElementwiseMaxFP16Op_scalar(TestElementwiseMaxOp_scalar):
y = np.array([0.5]).astype("float64") def init_data(self):
self.inputs = {'X': x, 'Y': y} self.x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float16")
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} self.y = np.array([0.5]).astype("float16")
class TestElementwiseMaxOp_Vector(TestElementwiseOp): class TestElementwiseMaxOp_Vector(TestElementwiseOp):
def setUp(self): def init_data(self):
self.op_type = "elementwise_max" self.x = np.random.random((100,)).astype("float64")
self.python_api = paddle.maximum
self.prim_op_type = "prim"
self.enable_cinn = False
x = np.random.random((100,)).astype("float64")
sgn = np.random.choice([-1, 1], (100,)).astype("float64") sgn = np.random.choice([-1, 1], (100,)).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, (100,)).astype("float64") self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype(
self.inputs = {'X': x, 'Y': y} "float64"
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} )
class TestElementwiseMaxFP16Op_Vector(TestElementwiseOp):
def init_data(self):
self.x = np.random.random((100,)).astype("float16")
sgn = np.random.choice([-1, 1], (100,)).astype("float16")
self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype(
"float16"
)
class TestElementwiseMaxBF16Op_Vector(TestElementwiseBF16Op):
def init_data(self):
self.x = np.random.random((100,)).astype("float32")
sgn = np.random.choice([-1, 1], (100,)).astype("float32")
self.y = self.x + sgn * np.random.uniform(0.1, 1, (100,)).astype(
"float32"
)
class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
...@@ -221,6 +289,26 @@ class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): ...@@ -221,6 +289,26 @@ class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
} }
class TestElementwiseMaxFP16Op_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
y = x[:, 0, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)
)
}
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_max" self.op_type = "elementwise_max"
...@@ -241,6 +329,26 @@ class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): ...@@ -241,6 +329,26 @@ class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
} }
class TestElementwiseMaxFP16Op_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
y = x[0, :, 0] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)
)
}
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_max" self.op_type = "elementwise_max"
...@@ -260,6 +368,25 @@ class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): ...@@ -260,6 +368,25 @@ class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
} }
class TestElementwiseMaxFP16Op_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float16)
sgn = np.random.choice([-1, 1], (100,)).astype(np.float16)
y = x[0, 0, :] + sgn * np.random.uniform(1, 2, (100,)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)
)
}
class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_max" self.op_type = "elementwise_max"
...@@ -280,6 +407,26 @@ class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): ...@@ -280,6 +407,26 @@ class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
} }
class TestElementwiseMaxFP16Op_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float16)
sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float16)
y = x[0, :, :, 0] + sgn * np.random.uniform(1, 2, (50, 2)).astype(
np.float16
)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(
self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1)
)
}
class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp): class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_max" self.op_type = "elementwise_max"
...@@ -293,5 +440,17 @@ class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp): ...@@ -293,5 +440,17 @@ class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
class TestElementwiseFP16Op_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
self.python_api = paddle.maximum
self.prim_op_type = "prim"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float16)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float16)
y = x + sgn * np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float16)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册