未验证 提交 cf209204 编写于 作者: C Charles-hit 提交者: GitHub

add prim test for sqrt and exp (#50942)

上级 6786c012
......@@ -1203,7 +1203,7 @@ if($ENV{USE_STANDALONE_EXECUTOR})
endif()
set(TEST_CINN_OPS test_softmax_op test_expand_v2_op test_reduce_op
test_slice_op)
test_slice_op test_activation_op)
foreach(TEST_CINN_OPS ${TEST_CINN_OPS})
if(WITH_CINN)
......
......@@ -378,6 +378,11 @@ class PrimForwardChecker:
)
def check(self):
if (
self.place is paddle.fluid.libpaddle.CUDAPlace
and not paddle.is_compiled_with_cuda()
):
return
self.eager_desire = self.get_eager_desire()
if self.enable_check_static_comp:
self.check_static_comp()
......@@ -773,6 +778,11 @@ class PrimGradChecker(PrimForwardChecker):
self.checker_name = "PrimGradChecker"
def check(self):
if (
self.place is paddle.fluid.libpaddle.CUDAPlace
and not paddle.is_compiled_with_cuda()
):
return
self.eager_desire = self.get_eager_desire()
if self.enable_check_eager_comp:
self.check_eager_comp()
......
......@@ -90,6 +90,72 @@ class TestActivation_ZeroDim(TestActivation):
self.shape = []
class TestExpPrimFp32(OpTest):
def setUp(self):
self.op_type = "exp"
self.prim_op_type = "prim"
self.init_dtype()
self.init_shape()
self.python_api = paddle.exp
np.random.seed(2049)
x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
out = np.exp(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
self.skip_cinn()
self.set_only_prim()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
def init_dtype(self):
self.dtype = np.float32
def init_shape(self):
self.shape = [12, 17]
def skip_cinn(self):
self.enable_cinn = False
def set_only_prim(self):
pass
class TestExpPrimFp64(TestExpPrimFp32):
def init_dtype(self):
self.dtype = np.float64
class TestExpPrimFp16(TestExpPrimFp32):
def init_dtype(self):
self.dtype = np.float16
def set_only_prim(self):
self.only_prim = True
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
def skip_cinn(self):
self.enable_cinn = False
class TestExpPrim_ZeroDim(TestExpPrimFp32):
def init_shape(self):
self.shape = []
def skip_cinn(self):
self.enable_cinn = False
class TestExpm1(TestActivation):
def setUp(self):
self.op_type = "expm1"
......@@ -167,6 +233,8 @@ class TestExpm1API(unittest.TestCase):
class TestParameter:
def test_out_name(self):
with fluid.program_guard(fluid.Program()):
if paddle.fluid.framework.in_dygraph_mode():
paddle.enable_static()
np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
data = paddle.static.data(name="X", shape=[-1, 1], dtype="float32")
out = eval("paddle.%s(data, name='Y')" % self.op_type)
......@@ -1062,6 +1130,7 @@ class TestSoftshrinkAPI(unittest.TestCase):
class TestSqrt(TestActivation, TestParameter):
def setUp(self):
self.op_type = "sqrt"
self.prim_op_type = "prim"
self.python_api = paddle.sqrt
self.init_dtype()
self.init_shape()
......@@ -1072,7 +1141,9 @@ class TestSqrt(TestActivation, TestParameter):
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
self.enable_cinn = False
# TODO(wanghao107) add prim test
def test_check_grad(self):
if self.dtype == np.float16:
return
......@@ -1082,17 +1153,58 @@ class TestSqrt(TestActivation, TestParameter):
self.check_output(check_eager=True)
class TestSqrtPrimFp32(TestActivation):
def setUp(self):
self.op_type = "sqrt"
self.prim_op_type = "prim"
self.python_api = paddle.sqrt
self.init_dtype()
self.init_shape()
np.random.seed(1023)
x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
out = np.sqrt(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
self.enable_cinn = False
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
def test_check_output(self):
self.check_output(check_eager=True)
def init_dtype(self):
self.dtype = np.float32
class TestSqrt_ZeroDim(TestSqrt):
def init_shape(self):
self.shape = []
class TestSqrtPrim_ZeroDim(TestSqrt):
def init_shape(self):
self.shape = []
def init_dtype(self):
self.dtype = np.float32
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_prim=True)
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestSqrtBF16(OpTest):
def setUp(self):
self.op_type = "sqrt"
self.prim_op_type = "prim"
self.python_api = paddle.sqrt
self.init_dtype()
self.init_shape()
......@@ -1105,6 +1217,8 @@ class TestSqrtBF16(OpTest):
'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
}
self.outputs = {'Out': convert_float_to_uint16(out)}
# TODO(wanghao107): add prim test
self.enable_cinn = False
def init_dtype(self):
self.dtype = np.uint16
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册