未验证 提交 090aa45d 编写于 作者: J Jiabin Yang 提交者: GitHub

【Prim】Enhance tests (#49814)

* support elementwise base func

* fix compiling error and add test

* remove additional param

* support vjp for div using comp

* remove additional change

* fix dy2st error with magic num

* fix dy magic num

* another magic

* another magic

* add more test

* fix windows problem

* another magic

* fix windows compile

* invoke ci

* add skip rename strategy

* support add vjp

* fix test_tanh

* support add with new axis cal

* fix resnet and some test

* add composite log

* support sub vjp

* enhance_tests

* support more dtype for full
上级 57521274
......@@ -126,6 +126,7 @@ Tensor full<DescTensor>(paddle::experimental::IntArray shape,
op->SetAttr("shape", shape.GetData());
PADDLE_ENFORCE_EQ(
((dtype == paddle::experimental::DataType::FLOAT32) ||
(dtype == paddle::experimental::DataType::FLOAT64) ||
(dtype == paddle::experimental::DataType::FLOAT16)),
true,
phi::errors::InvalidArgument(
......
......@@ -36,11 +36,21 @@ core.set_prim_enabled(True)
np.random.rand(3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(3, 1, 1),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 1),
np.float32,
),
],
)
class TestTanhGradComp(unittest.TestCase):
......
......@@ -41,6 +41,16 @@ core.set_prim_enabled(True)
np.random.rand(2, 3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 1),
np.float32,
),
],
)
class TestTanhGradComp(unittest.TestCase):
......
......@@ -41,6 +41,16 @@ core.set_prim_enabled(True)
np.random.rand(2, 3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 1),
np.float32,
),
],
)
class TestTanhGradComp(unittest.TestCase):
......
......@@ -14,8 +14,6 @@
import unittest
import autograd
import autograd.numpy
import numpy as np
import parameterized as param
......@@ -26,16 +24,22 @@ core.set_prim_enabled(True)
@param.parameterized_class(
('primal', 'cotangent', 'dtype'),
('primal', 'dtype'),
[
(np.random.rand(10, 10), np.random.rand(10, 10), np.float32),
(
np.random.rand(2, 3, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.float32,
),
],
)
class TestTanhGradComp(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.primal = cls.primal.astype(cls.dtype)
cls.cotangent = cls.cotangent.astype(cls.dtype)
def setUp(self):
paddle.enable_static()
......@@ -44,25 +48,29 @@ class TestTanhGradComp(unittest.TestCase):
paddle.disable_static()
def test_tanh_grad_comp(self):
def actual(primal, cotangent):
def actual(primal):
paddle.disable_static()
x = paddle.to_tensor(primal, dtype='float32', stop_gradient=False)
x.stop_gradient = False
v = paddle.to_tensor(
cotangent, dtype='float32', stop_gradient=False
)
y = paddle.tanh(x)
x_cotangent = paddle.grad(
y, x, v, create_graph=True, retain_graph=True
y, x, create_graph=True, retain_graph=True
)
return x_cotangent[0]
def desired(primal, cotangent):
return autograd.make_vjp(autograd.numpy.tanh)(primal)[0](cotangent)
def desired(primal):
paddle.disable_static()
x = paddle.to_tensor(primal, dtype='float32', stop_gradient=False)
x.stop_gradient = False
y = paddle.tanh(x)
x_cotangent = paddle.grad(
y, x, create_graph=True, retain_graph=True
)
return x_cotangent[0]
np.testing.assert_allclose(
actual=actual(self.primal, self.cotangent),
desired=desired(self.primal, self.cotangent),
actual=actual(self.primal),
desired=desired(self.primal),
rtol=1e-6,
atol=0,
)
......
......@@ -39,6 +39,16 @@ from paddle.fluid import core
np.random.rand(2, 3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 1),
np.float32,
),
],
)
class TestDivGradComp(unittest.TestCase):
......
......@@ -39,6 +39,16 @@ from paddle.fluid import core
np.random.rand(2, 3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 1),
np.float32,
),
],
)
class TestDivGradComp(unittest.TestCase):
......
......@@ -39,6 +39,16 @@ from paddle.fluid import core
np.random.rand(2, 3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 1),
np.float32,
),
],
)
class TestDivGradComp(unittest.TestCase):
......
......@@ -39,6 +39,16 @@ from paddle.fluid import core
np.random.rand(2, 3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 4),
np.float32,
),
(
np.random.rand(2, 3, 3, 4),
np.random.rand(2, 3, 1, 1),
np.float32,
),
],
)
class TestDivGradComp(unittest.TestCase):
......
......@@ -18,6 +18,7 @@ import unittest
import numpy as np
import paddle
from paddle.fluid import core
paddle.enable_static()
......@@ -125,6 +126,25 @@ class TestResnet50Accuracy(unittest.TestCase):
print(loss_p)
np.testing.assert_allclose(loss_c, loss_p, rtol=1e-05, atol=1e-05)
def test_check_resnet50_accuracy_with_composite(self):
place = (
paddle.CUDAPlace(0)
if paddle.is_compiled_with_cuda()
else paddle.CPUPlace()
)
loop_num = 10
feed = self.generate_random_data(loop_num)
core.set_prim_enabled(True)
loss_c = self.train(place, loop_num, feed, use_cinn=True)
core.set_prim_enabled(False)
loss_p = self.train(place, loop_num, feed, use_cinn=True)
print("Losses of Composite + CINN:")
print(loss_c)
print("Losses of CINN: ")
print(loss_p)
np.testing.assert_allclose(loss_c, loss_p, rtol=1e-05, atol=1e-05)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册