未验证 提交 230f3dac 编写于 作者: R ronnywang 提交者: GitHub

[XPU] use np.testing.assert_allclose instead of assertTrue(np.allclose(...)), test=kunlun (#44799)

上级 ede0990f
...@@ -70,8 +70,8 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase): ...@@ -70,8 +70,8 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase):
res = exe.run(main_program, res = exe.run(main_program,
feed={'x': feed_x}, feed={'x': feed_x},
fetch_list=[sums.name, x.grad_name]) fetch_list=[sums.name, x.grad_name])
self.assertTrue(np.allclose(res[0], feed_add)) np.testing.assert_allclose(res[0], feed_add)
self.assertTrue(np.allclose(res[1], ones / 1000.0)) np.testing.assert_allclose(res[1], ones / 1000.0)
class TestAssignOpError(unittest.TestCase): class TestAssignOpError(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from __future__ import print_function from __future__ import print_function
import unittest import unittest
import numpy import numpy as np
import sys import sys
sys.path.append("..") sys.path.append("..")
...@@ -53,7 +53,7 @@ class XPUTestAssignValueOp(XPUOpTestWrapper): ...@@ -53,7 +53,7 @@ class XPUTestAssignValueOp(XPUOpTestWrapper):
self.outputs = {"Out": self.value} self.outputs = {"Out": self.value}
def init_data(self): def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(self.dtype) self.value = np.random.random(size=(2, 5)).astype(self.dtype)
self.attrs["fp32_values"] = [float(v) for v in self.value.flat] self.attrs["fp32_values"] = [float(v) for v in self.value.flat]
def test_forward(self): def test_forward(self):
...@@ -62,20 +62,20 @@ class XPUTestAssignValueOp(XPUOpTestWrapper): ...@@ -62,20 +62,20 @@ class XPUTestAssignValueOp(XPUOpTestWrapper):
class TestAssignValueOp2(TestAssignValueOp): class TestAssignValueOp2(TestAssignValueOp):
def init_data(self): def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(numpy.int32) self.value = np.random.random(size=(2, 5)).astype(np.int32)
self.attrs["int32_values"] = [int(v) for v in self.value.flat] self.attrs["int32_values"] = [int(v) for v in self.value.flat]
class TestAssignValueOp3(TestAssignValueOp): class TestAssignValueOp3(TestAssignValueOp):
def init_data(self): def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(numpy.int64) self.value = np.random.random(size=(2, 5)).astype(np.int64)
self.attrs["int64_values"] = [int(v) for v in self.value.flat] self.attrs["int64_values"] = [int(v) for v in self.value.flat]
class TestAssignValueOp4(TestAssignValueOp): class TestAssignValueOp4(TestAssignValueOp):
def init_data(self): def init_data(self):
self.value = numpy.random.choice(a=[False, True], self.value = np.random.choice(a=[False, True],
size=(2, 5)).astype(numpy.bool) size=(2, 5)).astype(np.bool)
self.attrs["bool_values"] = [int(v) for v in self.value.flat] self.attrs["bool_values"] = [int(v) for v in self.value.flat]
...@@ -83,7 +83,7 @@ class TestAssignApi(unittest.TestCase): ...@@ -83,7 +83,7 @@ class TestAssignApi(unittest.TestCase):
def setUp(self): def setUp(self):
self.init_dtype() self.init_dtype()
self.value = (-100 + 200 * numpy.random.random(size=(2, 5))).astype( self.value = (-100 + 200 * np.random.random(size=(2, 5))).astype(
self.dtype) self.dtype)
self.place = fluid.XPUPlace(0) self.place = fluid.XPUPlace(0)
...@@ -98,8 +98,10 @@ class TestAssignApi(unittest.TestCase): ...@@ -98,8 +98,10 @@ class TestAssignApi(unittest.TestCase):
exe = fluid.Executor(self.place) exe = fluid.Executor(self.place)
[fetched_x] = exe.run(main_program, feed={}, fetch_list=[x]) [fetched_x] = exe.run(main_program, feed={}, fetch_list=[x])
self.assertTrue(numpy.array_equal(fetched_x, self.value), np.testing.assert_allclose(fetched_x,
"fetch_x=%s val=%s" % (fetched_x, self.value)) self.value,
err_msg="fetch_x=%s val=%s" %
(fetched_x, self.value))
self.assertEqual(fetched_x.dtype, self.value.dtype) self.assertEqual(fetched_x.dtype, self.value.dtype)
...@@ -119,8 +121,8 @@ class TestAssignApi4(TestAssignApi): ...@@ -119,8 +121,8 @@ class TestAssignApi4(TestAssignApi):
def setUp(self): def setUp(self):
self.init_dtype() self.init_dtype()
self.value = numpy.random.choice(a=[False, True], self.value = np.random.choice(a=[False, True],
size=(2, 5)).astype(numpy.bool) size=(2, 5)).astype(np.bool)
self.place = fluid.XPUPlace(0) self.place = fluid.XPUPlace(0)
def init_dtype(self): def init_dtype(self):
......
...@@ -512,7 +512,7 @@ class TestBilinearInterpOpAPI(unittest.TestCase): ...@@ -512,7 +512,7 @@ class TestBilinearInterpOpAPI(unittest.TestCase):
expect_res = bilinear_interp_np( expect_res = bilinear_interp_np(
x_data, out_h=12, out_w=12, align_corners=True) x_data, out_h=12, out_w=12, align_corners=True)
for res in results: for res in results:
self.assertTrue(np.allclose(res, expect_res)) np.testing.assert_allclose(res, expect_res)
''' '''
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -186,14 +186,14 @@ class TestClipAPI(unittest.TestCase): ...@@ -186,14 +186,14 @@ class TestClipAPI(unittest.TestCase):
}, },
fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8]) fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8])
self.assertTrue(np.allclose(res1, data.clip(0.2, 0.8))) np.testing.assert_allclose(res1, data.clip(0.2, 0.8))
self.assertTrue(np.allclose(res2, data.clip(0.2, 0.9))) np.testing.assert_allclose(res2, data.clip(0.2, 0.9))
self.assertTrue(np.allclose(res3, data.clip(min=0.3))) np.testing.assert_allclose(res3, data.clip(min=0.3))
self.assertTrue(np.allclose(res4, data.clip(max=0.7))) np.testing.assert_allclose(res4, data.clip(max=0.7))
self.assertTrue(np.allclose(res5, data.clip(min=0.2))) np.testing.assert_allclose(res5, data.clip(min=0.2))
self.assertTrue(np.allclose(res6, data.clip(max=0.8))) np.testing.assert_allclose(res6, data.clip(max=0.8))
self.assertTrue(np.allclose(res7, data.clip(max=-1))) np.testing.assert_allclose(res7, data.clip(max=-1))
self.assertTrue(np.allclose(res8, data)) np.testing.assert_allclose(res8, data)
paddle.disable_static() paddle.disable_static()
def test_clip_dygraph(self): def test_clip_dygraph(self):
...@@ -213,9 +213,9 @@ class TestClipAPI(unittest.TestCase): ...@@ -213,9 +213,9 @@ class TestClipAPI(unittest.TestCase):
images = paddle.to_tensor(data, dtype='float32') images = paddle.to_tensor(data, dtype='float32')
out_3 = self._executed_api(images, min=v_min, max=v_max) out_3 = self._executed_api(images, min=v_min, max=v_max)
self.assertTrue(np.allclose(out_1.numpy(), data.clip(0.2, 0.8))) np.testing.assert_allclose(out_1.numpy(), data.clip(0.2, 0.8))
self.assertTrue(np.allclose(out_2.numpy(), data.clip(0.2, 0.9))) np.testing.assert_allclose(out_2.numpy(), data.clip(0.2, 0.9))
self.assertTrue(np.allclose(out_3.numpy(), data.clip(0.2, 0.8))) np.testing.assert_allclose(out_3.numpy(), data.clip(0.2, 0.8))
def test_errors(self): def test_errors(self):
paddle.enable_static() paddle.enable_static()
......
...@@ -164,7 +164,7 @@ class XPUTestDropoutOp(XPUOpTestWrapper): ...@@ -164,7 +164,7 @@ class XPUTestDropoutOp(XPUOpTestWrapper):
m = paddle.nn.Dropout(p=0.) m = paddle.nn.Dropout(p=0.)
m.eval() m.eval()
result = m(input) result = m(input)
self.assertTrue(np.allclose(result.numpy(), result_np)) np.testing.assert_allclose(result.numpy(), result_np)
class TestDropoutBackward(unittest.TestCase): class TestDropoutBackward(unittest.TestCase):
...@@ -188,10 +188,9 @@ class XPUTestDropoutOp(XPUOpTestWrapper): ...@@ -188,10 +188,9 @@ class XPUTestDropoutOp(XPUOpTestWrapper):
out, mask = core.ops.dropout(input, 'dropout_prob', 0.5) out, mask = core.ops.dropout(input, 'dropout_prob', 0.5)
out.backward() out.backward()
self.assertTrue( np.testing.assert_allclose(
np.array_equal( input.gradient(),
input.gradient(), self.cal_grad_downscale_in_infer(mask.numpy()))
self.cal_grad_downscale_in_infer(mask.numpy())))
def test_backward_upscale_train(self): def test_backward_upscale_train(self):
for place in self.places: for place in self.places:
...@@ -205,10 +204,9 @@ class XPUTestDropoutOp(XPUOpTestWrapper): ...@@ -205,10 +204,9 @@ class XPUTestDropoutOp(XPUOpTestWrapper):
"upscale_in_train") "upscale_in_train")
out.backward() out.backward()
self.assertTrue( np.testing.assert_allclose(
np.allclose( input.gradient(),
input.gradient(), self.cal_grad_upscale_train(mask.numpy(), prob))
self.cal_grad_upscale_train(mask.numpy(), prob)))
def test_backward_upscale_train_2(self): def test_backward_upscale_train_2(self):
for place in self.places: for place in self.places:
...@@ -222,10 +220,9 @@ class XPUTestDropoutOp(XPUOpTestWrapper): ...@@ -222,10 +220,9 @@ class XPUTestDropoutOp(XPUOpTestWrapper):
"upscale_in_train") "upscale_in_train")
out.backward() out.backward()
self.assertTrue( np.testing.assert_allclose(
np.allclose( input.gradient(),
input.gradient(), self.cal_grad_upscale_train(mask.numpy(), prob))
self.cal_grad_upscale_train(mask.numpy(), prob)))
support_types = get_xpu_op_support_types('dropout') support_types = get_xpu_op_support_types('dropout')
......
...@@ -86,7 +86,7 @@ class TestDistModelRun(unittest.TestCase): ...@@ -86,7 +86,7 @@ class TestDistModelRun(unittest.TestCase):
print("load inference model api rst:", load_inference_model_rst) print("load inference model api rst:", load_inference_model_rst)
# step 5: compare two results # step 5: compare two results
self.assertTrue(np.allclose(dist_model_rst, load_inference_model_rst)) np.testing.assert_allclose(dist_model_rst, load_inference_model_rst)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -75,8 +75,12 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper): ...@@ -75,8 +75,12 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper):
hist2, _ = np.histogram(data, range=(-3, 5)) hist2, _ = np.histogram(data, range=(-3, 5))
hist2 = hist2.astype("float32") hist2 = hist2.astype("float32")
hist2 /= float(outs[0].size) hist2 /= float(outs[0].size)
self.assertTrue(np.allclose(hist, hist2, rtol=0, atol=0.01), np.testing.assert_allclose(hist,
"hist: " + str(hist) + " hist2: " + str(hist2)) hist2,
rtol=0,
atol=0.01,
err_msg="hist: " + str(hist) +
" hist2: " + str(hist2))
class TestMeanStdAreInt(TestGaussianRandomOp): class TestMeanStdAreInt(TestGaussianRandomOp):
......
...@@ -214,9 +214,10 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): ...@@ -214,9 +214,10 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper):
expected_result = np.matmul(data1.reshape(1, 2), expected_result = np.matmul(data1.reshape(1, 2),
data2.reshape(2, 1)) data2.reshape(2, 1))
self.assertTrue( np.testing.assert_allclose(np_res,
np.allclose(np_res, expected_result, atol=1e-3), expected_result,
"two value is\ atol=1e-3,
err_msg="two value is\
{}\n{}, check diff!".format(np_res, expected_result)) {}\n{}, check diff!".format(np_res, expected_result))
def test_dygraph_without_out(self): def test_dygraph_without_out(self):
...@@ -228,8 +229,9 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): ...@@ -228,8 +229,9 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper):
data2 = fluid.dygraph.to_variable(input_array2) data2 = fluid.dygraph.to_variable(input_array2)
out = paddle.mm(data1, data2) out = paddle.mm(data1, data2)
expected_result = np.matmul(input_array1, input_array2) expected_result = np.matmul(input_array1, input_array2)
self.assertTrue( np.testing.assert_allclose(expected_result,
np.allclose(expected_result, out.numpy(), atol=1e-3)) out.numpy(),
atol=1e-3)
class Test_API_Matmul(unittest.TestCase): class Test_API_Matmul(unittest.TestCase):
...@@ -244,8 +246,9 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): ...@@ -244,8 +246,9 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper):
self.in_type) self.in_type)
out = paddle.matmul(data1, data2) out = paddle.matmul(data1, data2)
expected_result = np.matmul(input_array1, input_array2) expected_result = np.matmul(input_array1, input_array2)
self.assertTrue( np.testing.assert_allclose(expected_result,
np.allclose(expected_result, out.numpy(), atol=1e-3)) out.numpy(),
atol=1e-3)
class API_TestMmError(unittest.TestCase): class API_TestMmError(unittest.TestCase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册