未验证 提交 e1674e8b 编写于 作者: C Charles-hit 提交者: GitHub

add prim test for some ops (#51749)

* add tanh and cast prim test

* fix tanh test

* fix 0-d test

* add sqrt fp16 prim test

* add public_python_api in prim test

* fix test_squeeze2_op

* add tanh prim test

* add dropout prim test

* [Dy2St]Fix clone for test state problem

* clean code

* modify test_cumsum_op

* modify test_cumsum_op

* fix dropout test

* add dropout in cmake

* fix dropout test

---------
Co-authored-by: NAurelius84 <zhangliujie@baidu.com>
上级 20befdef
......@@ -1212,7 +1212,9 @@ set(TEST_CINN_OPS
test_mean_op
test_unsqueeze2_op
test_meshgrid_op
test_gather_op)
test_gather_op
test_cast_op
test_dropout_op)
foreach(TEST_CINN_OPS ${TEST_CINN_OPS})
if(WITH_CINN)
......
......@@ -469,9 +469,12 @@ class TestLogSigmoidAPI(unittest.TestCase):
class TestTanh(TestActivation, TestParameter):
def setUp(self):
self.op_type = "tanh"
self.prim_op_type = "prim"
self.python_api = paddle.tanh
self.public_python_api = paddle.tanh
self.init_dtype()
self.init_shape()
self.if_enable_cinn()
np.random.seed(1024)
x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
......@@ -483,7 +486,7 @@ class TestTanh(TestActivation, TestParameter):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_prim=True)
def init_dtype(self):
# TODO If dtype is float64, the output (Out) has diff at CPUPlace
......@@ -491,11 +494,17 @@ class TestTanh(TestActivation, TestParameter):
# for now.
self.dtype = np.float32
def if_enable_cinn(self):
pass
class TestTanh_ZeroDim(TestTanh):
def init_shape(self):
self.shape = []
def if_enable_cinn(self):
self.enable_cinn = False
class TestTanhAPI(unittest.TestCase):
# test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
......@@ -601,7 +610,7 @@ class TestAtan(TestActivation, TestParameter):
self.assertEqual(z, z_expected)
class TestAtan_ZeroDim(TestTanh):
class TestAtan_ZeroDim(TestAtan):
def init_shape(self):
self.shape = []
......@@ -3910,7 +3919,7 @@ create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhshrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftshrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestSqrt, check_prim=True)
create_test_act_fp16_class(TestSqrtComp, check_prim=True)
create_test_act_fp16_class(TestAbs, check_prim=True)
create_test_act_fp16_class(TestCeil, grad_check=False)
......
......@@ -28,33 +28,8 @@ from paddle import fluid
from paddle.fluid import Program, core, program_guard
def convert_to_dtype_(dtype):
if dtype == 5:
return core.VarDesc.VarType.FP32
elif dtype == 6:
return core.VarDesc.VarType.FP64
elif dtype == 4:
return core.VarDesc.VarType.FP16
elif dtype == 2:
return core.VarDesc.VarType.INT32
elif dtype == 1:
return core.VarDesc.VarType.INT16
elif dtype == 3:
return core.VarDesc.VarType.INT64
elif dtype == 0:
return core.VarDesc.VarType.BOOL
elif dtype == 22:
return core.VarDesc.VarType.BF16
elif dtype == 20:
return core.VarDesc.VarType.UINT8
elif dtype == 21:
return core.VarDesc.VarType.INT8
elif dtype == np.complex64:
raise ValueError("Not supported dtype %s" % dtype)
def cast_wrapper(x, out_dtype=None):
return paddle.tensor.cast(x, convert_to_dtype_(out_dtype))
return paddle.cast(x, paddle.dtype(out_dtype))
class TestCastOpFp32ToFp64(OpTest):
......@@ -67,13 +42,15 @@ class TestCastOpFp32ToFp64(OpTest):
'out_dtype': int(core.VarDesc.VarType.FP64),
}
self.op_type = 'cast'
self.prim_op_type = "prim"
self.python_api = cast_wrapper
self.public_python_api = cast_wrapper
def test_check_output(self):
self.check_output()
def test_grad(self):
self.check_grad(['X'], ['Out'])
self.check_grad(['X'], ['Out'], check_prim=True)
class TestCastOpFp16ToFp32(OpTest):
......@@ -86,12 +63,16 @@ class TestCastOpFp16ToFp32(OpTest):
'out_dtype': int(core.VarDesc.VarType.FP32),
}
self.op_type = 'cast'
self.__class__.no_need_check_grad = True
self.prim_op_type = "prim"
self.python_api = cast_wrapper
self.public_python_api = cast_wrapper
def test_check_output(self):
self.check_output(atol=1e-3)
def test_grad(self):
self.check_grad(['X'], ['Out'], check_prim=True, only_check_prim=True)
class TestCastOpFp32ToFp16(OpTest):
def setUp(self):
......@@ -103,12 +84,16 @@ class TestCastOpFp32ToFp16(OpTest):
'out_dtype': int(core.VarDesc.VarType.FP16),
}
self.op_type = 'cast'
self.__class__.no_need_check_grad = True
self.prim_op_type = "prim"
self.python_api = cast_wrapper
self.public_python_api = cast_wrapper
def test_check_output(self):
self.check_output(atol=1e-3)
def test_grad(self):
self.check_grad(['X'], ['Out'], check_prim=True, only_check_prim=True)
class TestCastOpBf16ToFp32(OpTest):
def setUp(self):
......@@ -120,12 +105,17 @@ class TestCastOpBf16ToFp32(OpTest):
'out_dtype': int(core.VarDesc.VarType.FP32),
}
self.op_type = 'cast'
self.__class__.no_need_check_grad = True
self.prim_op_type = "prim"
self.python_api = cast_wrapper
self.public_python_api = cast_wrapper
self.enable_cinn = False
def test_check_output(self):
self.check_output()
def test_grad(self):
self.check_grad(['X'], ['Out'], check_prim=True, only_check_prim=True)
class TestCastOpFp32ToBf16(OpTest):
def setUp(self):
......@@ -137,12 +127,17 @@ class TestCastOpFp32ToBf16(OpTest):
'out_dtype': int(core.VarDesc.VarType.BF16),
}
self.op_type = 'cast'
self.__class__.no_need_check_grad = True
self.prim_op_type = "prim"
self.python_api = cast_wrapper
self.public_python_api = cast_wrapper
self.enable_cinn = False
def test_check_output(self):
self.check_output()
def test_grad(self):
self.check_grad(['X'], ['Out'], check_prim=True, only_check_prim=True)
class TestCastOpError(unittest.TestCase):
def test_errors(self):
......
......@@ -15,11 +15,13 @@
import unittest
import numpy as np
import parameterized as param
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle
from paddle import _C_ops, fluid, static
from paddle.fluid import Program, core, program_guard
from paddle.incubate.autograd import primapi
def dropout_wapper(
......@@ -42,46 +44,78 @@ def dropout_wapper(
)
def prim_dropout_wrapper(
x,
Seed=None,
dropout_prob=0.5,
is_test=False,
dropout_implementation='upscale_in_train',
seed=None,
fix_seed=None,
):
return paddle.nn.functional.dropout(
x,
p=dropout_prob,
axis=None,
training=not is_test,
mode=dropout_implementation,
)
class TestDropoutOp(OpTest):
def setUp(self):
self.op_type = "dropout"
self.prim_op_type = "comp"
self.python_api = dropout_wapper
self.public_python_api = prim_dropout_wrapper
self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
self.outputs = {
'Out': self.inputs['X'],
'Mask': np.ones((32, 64)).astype('uint8'),
}
# Because prim op compare res with dygraph
# when p = 0 dropout api return x,in dygraph mode x_grad = out_grad,
# but in static mode x_grad = []
self.enable_check_static_comp = False
def test_check_output(self):
self.check_output()
self.check_output(check_prim=True)
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_prim=True)
class TestDropoutOpInput1d(OpTest):
def setUp(self):
self.op_type = "dropout"
self.python_api = dropout_wapper
self.public_python_api = prim_dropout_wrapper
self.prim_op_type = "comp"
self.inputs = {'X': np.random.random((2000,)).astype("float32")}
self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
self.outputs = {
'Out': self.inputs['X'],
'Mask': np.ones((2000)).astype('uint8'),
}
# Because prim op compare res with dygraph
# when p = 0 dropout api return x,in dygraph mode x_grad = out_grad,
# but in static mode x_grad = []
self.enable_check_static_comp = False
def test_check_output(self):
self.check_output()
self.check_output(check_prim=True)
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_prim=True)
class TestDropoutOp2(TestDropoutOp):
def setUp(self):
self.op_type = "dropout"
self.python_api = dropout_wapper
self.public_python_api = prim_dropout_wrapper
self.prim_op_type = "comp"
self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {'dropout_prob': 1.0, 'fix_seed': True, 'is_test': False}
self.outputs = {
......@@ -94,12 +128,18 @@ class TestDropoutOp3(TestDropoutOp):
def setUp(self):
self.op_type = "dropout"
self.python_api = dropout_wapper
self.public_python_api = prim_dropout_wrapper
self.prim_op_type = "comp"
self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")}
self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
self.outputs = {
'Out': self.inputs['X'],
'Mask': np.ones((32, 64, 2)).astype('uint8'),
}
# Because prim op compare res with dygraph
# when p = 0 dropout api return x,in dygraph mode x_grad = out_grad,
# but in static mode x_grad = []
self.enable_check_static_comp = False
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
......@@ -107,6 +147,8 @@ class TestDropoutOp4(OpTest):
def setUp(self):
self.op_type = "dropout"
self.python_api = dropout_wapper
self.public_python_api = prim_dropout_wrapper
self.prim_op_type = "comp"
self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {'dropout_prob': 0.35, 'fix_seed': True, 'is_test': True}
self.outputs = {
......@@ -114,7 +156,7 @@ class TestDropoutOp4(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_prim=True)
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
......@@ -122,6 +164,8 @@ class TestDropoutOp5(OpTest):
def setUp(self):
self.op_type = "dropout"
self.python_api = dropout_wapper
self.public_python_api = prim_dropout_wrapper
self.prim_op_type = "comp"
self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")}
self.attrs = {'dropout_prob': 0.75, 'is_test': True}
self.outputs = {
......@@ -129,13 +173,15 @@ class TestDropoutOp5(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_prim=True)
class TestDropoutOp6(TestDropoutOp):
def setUp(self):
self.op_type = "dropout"
self.python_api = dropout_wapper
self.public_python_api = prim_dropout_wrapper
self.prim_op_type = "comp"
self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {
'dropout_prob': 1.0,
......@@ -153,6 +199,8 @@ class TestDropoutOp7(TestDropoutOp):
def setUp(self):
self.op_type = "dropout"
self.python_api = dropout_wapper
self.public_python_api = prim_dropout_wrapper
self.prim_op_type = "comp"
self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")}
self.attrs = {
'dropout_prob': 0.0,
......@@ -164,6 +212,10 @@ class TestDropoutOp7(TestDropoutOp):
'Out': self.inputs['X'],
'Mask': np.ones((32, 64, 2)).astype('uint8'),
}
# Because prim op compare res with dygraph
# when p = 0 dropout api return x,in dygraph mode x_grad = out_grad,
# but in static mode x_grad = []
self.enable_check_static_comp = False
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
......@@ -171,6 +223,8 @@ class TestDropoutOp8(OpTest):
def setUp(self):
self.op_type = "dropout"
self.python_api = dropout_wapper
self.public_python_api = prim_dropout_wrapper
self.prim_op_type = "comp"
self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {
'dropout_prob': 0.35,
......@@ -181,7 +235,7 @@ class TestDropoutOp8(OpTest):
self.outputs = {'Out': self.inputs['X']}
def test_check_output(self):
self.check_output()
self.check_output(check_prim=True)
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
......@@ -189,6 +243,8 @@ class TestDropoutOp9(OpTest):
def setUp(self):
self.op_type = "dropout"
self.python_api = dropout_wapper
self.public_python_api = prim_dropout_wrapper
self.prim_op_type = "comp"
self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")}
self.attrs = {
'dropout_prob': 0.75,
......@@ -198,13 +254,15 @@ class TestDropoutOp9(OpTest):
self.outputs = {'Out': self.inputs['X']}
def test_check_output(self):
self.check_output()
self.check_output(check_prim=True)
class TestDropoutOpWithSeed(OpTest):
def setUp(self):
self.op_type = "dropout"
self.python_api = dropout_wapper
self.public_python_api = prim_dropout_wrapper
self.prim_op_type = "comp"
self.inputs = {
"X": np.random.random((32, 64)).astype("float32"),
"Seed": np.asarray([125], dtype="int32"),
......@@ -216,12 +274,16 @@ class TestDropoutOpWithSeed(OpTest):
'Out': self.inputs['X'],
'Mask': np.ones((32, 64)).astype('uint8'),
}
# Because prim op compare res with dygraph
# when p = 0 dropout api return x,in dygraph mode x_grad = out_grad,
# but in static mode x_grad = []
self.enable_check_static_comp = False
def test_check_output(self):
self.check_output()
self.check_output(check_prim=True)
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', max_relative_error=0.05)
self.check_grad(['X'], 'Out', max_relative_error=0.05, check_prim=True)
@unittest.skipIf(
......@@ -233,6 +295,8 @@ class TestFP16DropoutOp(OpTest):
def setUp(self):
self.op_type = "dropout"
self.python_api = dropout_wapper
self.public_python_api = prim_dropout_wrapper
self.prim_op_type = "comp"
self.init_test_case()
x = np.random.random(self.input_size).astype("float16")
......@@ -244,6 +308,11 @@ class TestFP16DropoutOp(OpTest):
'is_test': True,
}
self.outputs = {'Out': out}
self.enable_cinn = False
# Because prim op compare res with dygraph
# when p = 0 dropout api return x,in dygraph mode x_grad = out_grad,
# but in static mode x_grad = []
self.enable_check_static_comp = False
def init_test_case(self):
self.input_size = [32, 64]
......@@ -251,7 +320,9 @@ class TestFP16DropoutOp(OpTest):
self.fix_seed = True
def test_check_output(self):
self.check_output_with_place(core.CUDAPlace(0), atol=1e-3)
self.check_output_with_place(
core.CUDAPlace(0), atol=1e-3, check_prim=True
)
@unittest.skipIf(
......@@ -270,7 +341,10 @@ class TestBF16DropoutOp(OpTest):
def setUp(self):
self.op_type = "dropout"
self.python_api = dropout_wapper
self.public_python_api = prim_dropout_wrapper
self.prim_op_type = "comp"
self.dtype = np.uint16
self.enable_cinn = False
x = np.random.random((32, 64)).astype("float32")
self.inputs = {'X': convert_float_to_uint16(x)}
......@@ -288,6 +362,18 @@ class TestBF16DropoutOp(OpTest):
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
def test_check_output_for_prim(self):
# greater_equal does't support bfloat16 in cpu
if core.is_compiled_with_cuda():
self.check_output_with_place(core.CUDAPlace(0))
def test_check_grad_for_prim(self):
# greater_equal does't support bfloat16 in cpu
if core.is_compiled_with_cuda():
self.check_grad_with_place(
core.CUDAPlace(0), ['X'], 'Out', only_check_prim=True
)
class TestDropoutOpWithSeedOnCPUPlace(unittest.TestCase):
def test_seed_cpu_place(self):
......@@ -366,6 +452,7 @@ class TestDropoutOpWithSeedOnCPUPlace(unittest.TestCase):
class TestDropoutOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
paddle.enable_static()
def test_Variable():
# the input of dropout must be Variable.
......@@ -395,6 +482,7 @@ class TestDropoutFAPI(unittest.TestCase):
self.places.append(fluid.CUDAPlace(0))
def check_static_result(self, place):
paddle.enable_static()
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = paddle.static.data(
name="input", shape=[-1, -1], dtype="float32"
......@@ -608,6 +696,7 @@ class TestDropoutFAPI(unittest.TestCase):
class TestDropoutFAPIError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
def test_Variable():
......@@ -731,6 +820,7 @@ class TestDropout2DFAPI(unittest.TestCase):
self.places.append(fluid.CUDAPlace(0))
def check_static_result(self, place):
paddle.enable_static()
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = paddle.static.data(
name="input", shape=[2, 3, 4, 5], dtype="float32"
......@@ -780,6 +870,7 @@ class TestDropout2DFAPI(unittest.TestCase):
class TestDropout2DFAPIError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
def test_xdim():
......@@ -824,6 +915,7 @@ class TestDropout2DCAPI(unittest.TestCase):
def test_static_fp16_with_gpu(self):
if paddle.fluid.core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
paddle.enable_static()
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
......@@ -853,6 +945,7 @@ class TestDropout3DFAPI(unittest.TestCase):
self.places.append(fluid.CUDAPlace(0))
def check_static_result(self, place):
paddle.enable_static()
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = paddle.static.data(
name="input", shape=[2, 3, 4, 5, 6], dtype="float32"
......@@ -902,6 +995,7 @@ class TestDropout3DFAPI(unittest.TestCase):
class TestDropout3DFAPIError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
def test_xdim():
......@@ -1230,7 +1324,7 @@ class TestDropOutWithProbTensor(unittest.TestCase):
def run_static(self, x):
paddle.seed(2022)
main_program = Program()
paddle.enable_static()
with program_guard(main_program):
input = paddle.static.data(shape=x.shape, name='x', dtype='float32')
out = self.api_case(input)
......@@ -1335,6 +1429,294 @@ class TestRandomValue(unittest.TestCase):
paddle.enable_static()
places = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda():
places.append(paddle.CUDAPlace(0))
class PrimNet(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(
self,
x,
p=0.5,
axis=None,
training=True,
mode="upscale_in_train",
):
out = paddle.nn.functional.dropout(
x=x, p=p, axis=axis, training=training, mode=mode
)
return out
def apply_to_static(net, use_cinn):
build_strategy = paddle.static.BuildStrategy()
build_strategy.build_cinn_pass = use_cinn
return paddle.jit.to_static(net, build_strategy=build_strategy)
@param.parameterized_class(
('name', 'x', 'p', 'is_test', 'mode', 'seed', 'dtype', 'places'),
(
(
'fp32',
np.random.rand(100000),
0.3,
False,
'upscale_in_train',
1002,
'float32',
places,
),
(
'fp64',
np.random.rand(100000),
0.7,
False,
'upscale_in_train',
9999,
'float64',
places,
),
(
'is_test=True',
np.random.rand(100000),
0.5,
True,
'upscale_in_train',
1002,
'float32',
places,
),
(
'p=1.0',
np.random.rand(100000),
1.0,
True,
'upscale_in_train',
1002,
'float32',
places,
),
(
'p=1.0,test=False',
np.random.rand(100000),
1.0,
False,
'upscale_in_train',
1002,
'float32',
places,
),
(
'p=0.0',
np.random.rand(100000),
1.0,
True,
'upscale_in_train',
1002,
'float32',
places,
),
(
'downgrade_train',
np.random.rand(100000),
0.5,
False,
'downscale_in_infer',
1002,
'float32',
places,
),
(
'fp32_cpu',
np.random.rand(100000),
0.6,
False,
'upscale_in_train',
9899,
'float64',
[paddle.CPUPlace()],
),
(
'fp64_cpu',
np.random.rand(100000),
0.6,
False,
'upscale_in_train',
9899,
'float64',
[paddle.CPUPlace()],
),
(
'downgrade_train_cpu',
np.random.rand(100000),
0.5,
False,
'downscale_in_infer',
1002,
'float32',
[paddle.CPUPlace()],
),
),
)
class TestCompositeDropout(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x = cls.x.astype(cls.dtype)
core._set_prim_all_enabled(True)
@classmethod
def tearDownClass(cls):
core._set_prim_all_enabled(False)
def setUp(self):
paddle.seed(self.seed)
self.fwd_desire = []
self.rev_desire = []
for place in self.places:
fwd_desire, rev_desire = self.get_eager_desire(place)
self.fwd_desire.append(fwd_desire.numpy())
self.rev_desire.append(rev_desire.numpy())
def get_eager_desire(self, place):
paddle.disable_static()
paddle.seed(self.seed)
if isinstance(place, fluid.CPUPlace):
paddle.set_device("cpu")
if isinstance(place, fluid.CUDAPlace):
paddle.set_device("gpu")
core.set_prim_eager_enabled(False)
input_ = paddle.to_tensor(
data=self.x, dtype=self.dtype, place=place, stop_gradient=False
)
output = paddle.nn.functional.dropout(
input_, self.p, training=(not self.is_test), mode=self.mode
)
grad = paddle.grad(output, input_)
return output, grad[0]
def test_static_comp(self):
fwd_actual = []
rev_actual = []
mps = []
with paddle.fluid.framework._static_guard():
for place in self.places:
paddle.seed(self.seed)
mp, sp = paddle.static.Program(), paddle.static.Program()
with paddle.static.program_guard(mp, sp):
input_ = paddle.static.data(
'x', shape=self.x.shape, dtype=self.x.dtype
)
input_.stop_gradient = False
output = paddle.nn.functional.dropout(
input_,
self.p,
training=(not self.is_test),
mode=self.mode,
)
if core._is_fwd_prim_enabled():
primapi.to_prim(mp.blocks)
grad = paddle.static.gradients(output, input_)[0]
exe = paddle.static.Executor(place)
exe.run(sp)
fwd, rev = exe.run(
mp, feed={input_.name: self.x}, fetch_list=[output, grad]
)
fwd_actual.append(fwd)
rev_actual.append(rev)
mps.append(mp)
for i in range(len(self.places)):
self.assertTrue(
'dropout' not in [op.type for op in mps[i].block(0).ops]
)
np.testing.assert_allclose(
self.fwd_desire[i].sum(),
fwd_actual[i].sum(),
rtol=1e-2, # mean of uniform distribution, scale for avoid random failed
atol=0,
)
np.testing.assert_allclose(
self.rev_desire[i].sum(),
rev_actual[i].sum(),
rtol=1e-2, # mean of uniform distribution, scale for avoid random failed
atol=0,
)
def test_jit_comp(self):
fwd_actual = []
rev_actual = []
paddle.disable_static()
for place in self.places:
if isinstance(place, fluid.CPUPlace):
paddle.set_device("cpu")
if isinstance(place, fluid.CUDAPlace):
paddle.set_device("gpu")
paddle.seed(self.seed)
input_ = paddle.to_tensor(
data=self.x, dtype=self.dtype, place=place, stop_gradient=False
)
net = PrimNet()
net = apply_to_static(net, False)
output = net(
input_, self.p, training=(not self.is_test), mode=self.mode
)
grad = paddle.grad(output, input_)
fwd_actual.append(output.numpy())
rev_actual.append(grad[0].numpy())
for i in range(len(self.places)):
np.testing.assert_allclose(
self.fwd_desire[i].sum(),
fwd_actual[i].sum(),
rtol=1e-2, # mean of uniform distribution, scale for avoid random failed
atol=0,
)
np.testing.assert_allclose(
self.rev_desire[i].sum(),
rev_actual[i].sum(),
rtol=1e-2, # mean of uniform distribution, scale for avoid random failed
atol=0,
)
def test_jit_comp_with_cinn(self):
fwd_actual = []
rev_actual = []
paddle.disable_static()
for place in self.places:
if isinstance(place, fluid.CPUPlace):
paddle.set_device("cpu")
if isinstance(place, fluid.CUDAPlace):
paddle.set_device("gpu")
paddle.seed(self.seed)
input_ = paddle.to_tensor(
data=self.x, dtype=self.dtype, place=place, stop_gradient=False
)
net = PrimNet()
net = apply_to_static(net, False)
output = net(
input_, self.p, training=(not self.is_test), mode=self.mode
)
grad = paddle.grad(output, input_)
fwd_actual.append(output.numpy())
rev_actual.append(grad[0].numpy())
for i in range(len(self.places)):
np.testing.assert_allclose(
self.fwd_desire[i].sum(),
fwd_actual[i].sum(),
rtol=1e-2, # mean of uniform distribution, scale for avoid random failed
atol=0,
)
np.testing.assert_allclose(
self.rev_desire[i].sum(),
rev_actual[i].sum(),
rtol=1e-2, # mean of uniform distribution, scale for avoid random failed
atol=0,
)
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册