未验证 提交 93c5c887 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle] use np.testing.assert_array_equal instead of...

[CodeStyle] use np.testing.assert_array_equal instead of self.assertTrue(np.array_equal(...)) (#44947)

* automatically fix

* update comments

* numpy -> np

* self.assertEqual(..., True)

* wrong usage (err_msg=True)

这不是修复导致的错误,这些是原来 `self.assertTrue(..., True)`
的错误用法,因此在修复后将其认为位置参数 `err_msg`

* some missing fix
上级 f694e991
...@@ -43,9 +43,10 @@ class TestCustomKernelDot(unittest.TestCase): ...@@ -43,9 +43,10 @@ class TestCustomKernelDot(unittest.TestCase):
y = paddle.to_tensor(y_data) y = paddle.to_tensor(y_data)
out = paddle.dot(x, y) out = paddle.dot(x, y)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(out.numpy(), result), out.numpy(),
"custom kernel dot out: {},\n numpy dot out: {}".format( result,
err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format(
out.numpy(), result)) out.numpy(), result))
...@@ -72,9 +73,10 @@ class TestCustomKernelDotC(unittest.TestCase): ...@@ -72,9 +73,10 @@ class TestCustomKernelDotC(unittest.TestCase):
y = paddle.to_tensor(y_data) y = paddle.to_tensor(y_data)
out = paddle.dot(x, y) out = paddle.dot(x, y)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(out.numpy(), result), out.numpy(),
"custom kernel dot out: {},\n numpy dot out: {}".format( result,
err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format(
out.numpy(), result)) out.numpy(), result))
......
...@@ -65,9 +65,10 @@ class TestCustomKernelLoad(unittest.TestCase): ...@@ -65,9 +65,10 @@ class TestCustomKernelLoad(unittest.TestCase):
y = paddle.to_tensor(y_data) y = paddle.to_tensor(y_data)
out = paddle.dot(x, y) out = paddle.dot(x, y)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(out.numpy(), result), out.numpy(),
"custom kernel dot out: {},\n numpy dot out: {}".format( result,
err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format(
out.numpy(), result)) out.numpy(), result))
def tearDown(self): def tearDown(self):
......
...@@ -51,7 +51,7 @@ class TestContextPool(unittest.TestCase): ...@@ -51,7 +51,7 @@ class TestContextPool(unittest.TestCase):
x = paddle.ones([2, 2], dtype='float32') x = paddle.ones([2, 2], dtype='float32')
out = custom_ops.context_pool_test(x) out = custom_ops.context_pool_test(x)
self.assertTrue(np.array_equal(x.numpy(), out.numpy())) np.testing.assert_array_equal(x.numpy(), out.numpy())
def test_using_context_pool(self): def test_using_context_pool(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -66,7 +66,7 @@ class TestJitCustomAttrs(unittest.TestCase): ...@@ -66,7 +66,7 @@ class TestJitCustomAttrs(unittest.TestCase):
out.stop_gradient = False out.stop_gradient = False
out.backward() out.backward()
self.assertTrue(np.array_equal(x.numpy(), out.numpy())) np.testing.assert_array_equal(x.numpy(), out.numpy())
def test_attr_value(self): def test_attr_value(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -85,7 +85,7 @@ class TestJitCustomAttrs(unittest.TestCase): ...@@ -85,7 +85,7 @@ class TestJitCustomAttrs(unittest.TestCase):
out.stop_gradient = False out.stop_gradient = False
out.backward() out.backward()
self.assertTrue(np.array_equal(x.numpy(), out.numpy())) np.testing.assert_array_equal(x.numpy(), out.numpy())
def test_const_attr_value(self): def test_const_attr_value(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -112,9 +112,10 @@ class TestCustomConcatDynamicAxisJit(unittest.TestCase): ...@@ -112,9 +112,10 @@ class TestCustomConcatDynamicAxisJit(unittest.TestCase):
self.axises = [0, 1] self.axises = [0, 1]
def check_output(self, out, pd_out, name): def check_output(self, out, pd_out, name):
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(out, pd_out), out,
"custom op {}: {},\n paddle api {}: {}".format( pd_out,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out)) name, out, name, pd_out))
def func_dynamic(self): def func_dynamic(self):
......
...@@ -97,9 +97,10 @@ class TestCustomConjJit(unittest.TestCase): ...@@ -97,9 +97,10 @@ class TestCustomConjJit(unittest.TestCase):
self.shape = [2, 20, 2, 3] self.shape = [2, 20, 2, 3]
def check_output(self, out, pd_out, name): def check_output(self, out, pd_out, name):
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(out, pd_out), out,
"custom op {}: {},\n paddle api {}: {}".format( pd_out,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out)) name, out, name, pd_out))
def run_dynamic(self, dtype, np_input): def run_dynamic(self, dtype, np_input):
......
...@@ -97,9 +97,10 @@ class TestCustomLinearJit(unittest.TestCase): ...@@ -97,9 +97,10 @@ class TestCustomLinearJit(unittest.TestCase):
self.np_bias = np.ones([4], dtype="float32") self.np_bias = np.ones([4], dtype="float32")
def check_output(self, out, pd_out, name): def check_output(self, out, pd_out, name):
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(out, pd_out), out,
"custom op {}: {},\n paddle api {}: {}".format( pd_out,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out)) name, out, name, pd_out))
def test_static(self): def test_static(self):
......
...@@ -83,7 +83,7 @@ class TestCustomRawReluOp(unittest.TestCase): ...@@ -83,7 +83,7 @@ class TestCustomRawReluOp(unittest.TestCase):
y1_value, y2_value = exe.run(paddle.static.default_main_program(), y1_value, y2_value = exe.run(paddle.static.default_main_program(),
feed={x.name: x_np}, feed={x.name: x_np},
fetch_list=[y1, y2]) fetch_list=[y1, y2])
self.assertTrue(np.array_equal(y1_value, y2_value)) np.testing.assert_array_equal(y1_value, y2_value)
paddle.disable_static() paddle.disable_static()
......
...@@ -121,12 +121,11 @@ class TestDygraphModel(unittest.TestCase): ...@@ -121,12 +121,11 @@ class TestDygraphModel(unittest.TestCase):
if _in_legacy_dygraph(): if _in_legacy_dygraph():
custom_relu_dy2stat_train_out = self.train_model( custom_relu_dy2stat_train_out = self.train_model(
use_custom_op=True, dy2stat=True) # for to_static use_custom_op=True, dy2stat=True) # for to_static
self.assertTrue( np.testing.assert_array_equal(origin_relu_train_out,
np.array_equal(origin_relu_train_out, custom_relu_dy2stat_train_out)
custom_relu_dy2stat_train_out))
self.assertTrue( np.testing.assert_array_equal(origin_relu_train_out,
np.array_equal(origin_relu_train_out, custom_relu_train_out)) custom_relu_train_out)
# for eval # for eval
origin_relu_eval_out = self.eval_model(use_custom_op=False) origin_relu_eval_out = self.eval_model(use_custom_op=False)
...@@ -134,12 +133,11 @@ class TestDygraphModel(unittest.TestCase): ...@@ -134,12 +133,11 @@ class TestDygraphModel(unittest.TestCase):
if _in_legacy_dygraph(): if _in_legacy_dygraph():
custom_relu_dy2stat_eval_out = self.eval_model( custom_relu_dy2stat_eval_out = self.eval_model(
use_custom_op=True, dy2stat=True) # for to_static use_custom_op=True, dy2stat=True) # for to_static
self.assertTrue( np.testing.assert_array_equal(origin_relu_eval_out,
np.array_equal(origin_relu_eval_out, custom_relu_dy2stat_eval_out)
custom_relu_dy2stat_eval_out))
self.assertTrue( np.testing.assert_array_equal(origin_relu_eval_out,
np.array_equal(origin_relu_eval_out, custom_relu_eval_out)) custom_relu_eval_out)
def test_train_eval(self): def test_train_eval(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -243,11 +241,10 @@ class TestStaticModel(unittest.TestCase): ...@@ -243,11 +241,10 @@ class TestStaticModel(unittest.TestCase):
use_custom_op=True, use_custom_op=True,
use_pe=True) use_pe=True)
self.assertTrue( np.testing.assert_array_equal(original_relu_train_out,
np.array_equal(original_relu_train_out, custom_relu_train_out)) custom_relu_train_out)
self.assertTrue( np.testing.assert_array_equal(original_relu_train_pe_out,
np.array_equal(original_relu_train_pe_out, custom_relu_train_pe_out)
custom_relu_train_pe_out))
# for eval # for eval
original_relu_eval_out = self.eval_model(device, original_relu_eval_out = self.eval_model(device,
...@@ -261,11 +258,10 @@ class TestStaticModel(unittest.TestCase): ...@@ -261,11 +258,10 @@ class TestStaticModel(unittest.TestCase):
use_custom_op=True, use_custom_op=True,
use_pe=True) use_pe=True)
self.assertTrue( np.testing.assert_array_equal(original_relu_eval_out,
np.array_equal(original_relu_eval_out, custom_relu_eval_out)) custom_relu_eval_out)
self.assertTrue( np.testing.assert_array_equal(original_relu_eval_pe_out,
np.array_equal(original_relu_eval_pe_out, custom_relu_eval_pe_out)
custom_relu_eval_pe_out))
def train_model(self, device, use_custom_op=False, use_pe=False): def train_model(self, device, use_custom_op=False, use_pe=False):
# reset random seed # reset random seed
......
...@@ -71,10 +71,11 @@ class TestJITLoad(unittest.TestCase): ...@@ -71,10 +71,11 @@ class TestJITLoad(unittest.TestCase):
out = custom_relu_static(custom_op, device, dtype, x) out = custom_relu_static(custom_op, device, dtype, x)
pd_out = custom_relu_static(custom_op, device, dtype, x, pd_out = custom_relu_static(custom_op, device, dtype, x,
False) False)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(out, pd_out), out,
"custom op out: {},\n paddle api out: {}".format( pd_out,
out, pd_out)) err_msg='custom op out: {},\n paddle api out: {}'.
format(out, pd_out))
def func_dynamic(self): def func_dynamic(self):
for device in self.devices: for device in self.devices:
...@@ -87,14 +88,16 @@ class TestJITLoad(unittest.TestCase): ...@@ -87,14 +88,16 @@ class TestJITLoad(unittest.TestCase):
x) x)
pd_out, pd_x_grad = custom_relu_dynamic( pd_out, pd_x_grad = custom_relu_dynamic(
custom_op, device, dtype, x, False) custom_op, device, dtype, x, False)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(out, pd_out), out,
"custom op out: {},\n paddle api out: {}".format( pd_out,
out, pd_out)) err_msg='custom op out: {},\n paddle api out: {}'.
self.assertTrue( format(out, pd_out))
np.array_equal(x_grad, pd_x_grad), np.testing.assert_array_equal(
"custom op x grad: {},\n paddle api x grad: {}".format( x_grad,
x_grad, pd_x_grad)) pd_x_grad,
err_msg='custom op x grad: {},\n paddle api x grad: {}'.
format(x_grad, pd_x_grad))
def test_dynamic(self): def test_dynamic(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -224,10 +224,11 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): ...@@ -224,10 +224,11 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
out = custom_relu_static(custom_op, device, dtype, x) out = custom_relu_static(custom_op, device, dtype, x)
pd_out = custom_relu_static(custom_op, device, dtype, x, pd_out = custom_relu_static(custom_op, device, dtype, x,
False) False)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(out, pd_out), out,
"custom op out: {},\n paddle api out: {}".format( pd_out,
out, pd_out)) err_msg='custom op out: {},\n paddle api out: {}'.
format(out, pd_out))
def test_static_pe(self): def test_static_pe(self):
for device in self.devices: for device in self.devices:
...@@ -239,10 +240,11 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): ...@@ -239,10 +240,11 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
out = custom_relu_static_pe(custom_op, device, dtype, x) out = custom_relu_static_pe(custom_op, device, dtype, x)
pd_out = custom_relu_static_pe(custom_op, device, dtype, x, pd_out = custom_relu_static_pe(custom_op, device, dtype, x,
False) False)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(out, pd_out), out,
"custom op out: {},\n paddle api out: {}".format( pd_out,
out, pd_out)) err_msg='custom op out: {},\n paddle api out: {}'.
format(out, pd_out))
def func_dynamic(self): def func_dynamic(self):
for device in self.devices: for device in self.devices:
...@@ -255,14 +257,16 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): ...@@ -255,14 +257,16 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
x) x)
pd_out, pd_x_grad = custom_relu_dynamic( pd_out, pd_x_grad = custom_relu_dynamic(
custom_op, device, dtype, x, False) custom_op, device, dtype, x, False)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(out, pd_out), out,
"custom op out: {},\n paddle api out: {}".format( pd_out,
out, pd_out)) err_msg='custom op out: {},\n paddle api out: {}'.
self.assertTrue( format(out, pd_out))
np.array_equal(x_grad, pd_x_grad), np.testing.assert_array_equal(
"custom op x grad: {},\n paddle api x grad: {}".format( x_grad,
x_grad, pd_x_grad)) pd_x_grad,
err_msg='custom op x grad: {},\n paddle api x grad: {}'.
format(x_grad, pd_x_grad))
def test_dynamic(self): def test_dynamic(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -286,10 +290,11 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): ...@@ -286,10 +290,11 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
predict_infer = exe.run(inference_program, predict_infer = exe.run(inference_program,
feed={feed_target_names[0]: np_data}, feed={feed_target_names[0]: np_data},
fetch_list=fetch_targets) fetch_list=fetch_targets)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(predict, predict_infer), predict,
"custom op predict: {},\n custom op infer predict: {}". predict_infer,
format(predict, predict_infer)) err_msg='custom op predict: {},\n custom op infer predict: {}'
.format(predict, predict_infer))
paddle.disable_static() paddle.disable_static()
def test_static_save_and_run_inference_predictor(self): def test_static_save_and_run_inference_predictor(self):
...@@ -331,14 +336,16 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): ...@@ -331,14 +336,16 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
self.custom_ops[0], device, dtype, x) self.custom_ops[0], device, dtype, x)
pd_out, pd_dx_grad = custom_relu_double_grad_dynamic( pd_out, pd_dx_grad = custom_relu_double_grad_dynamic(
self.custom_ops[0], device, dtype, x, False) self.custom_ops[0], device, dtype, x, False)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(out, pd_out), out,
"custom op out: {},\n paddle api out: {}".format( pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out)) out, pd_out))
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(dx_grad, pd_dx_grad), dx_grad,
"custom op dx grad: {},\n paddle api dx grad: {}".format( pd_dx_grad,
dx_grad, pd_dx_grad)) err_msg='custom op dx grad: {},\n paddle api dx grad: {}'.
format(dx_grad, pd_dx_grad))
def test_with_dataloader(self): def test_with_dataloader(self):
for device in self.devices: for device in self.devices:
...@@ -357,9 +364,10 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): ...@@ -357,9 +364,10 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
for batch_id, (image, _) in enumerate(train_loader()): for batch_id, (image, _) in enumerate(train_loader()):
out = self.custom_ops[0](image) out = self.custom_ops[0](image)
pd_out = paddle.nn.functional.relu(image) pd_out = paddle.nn.functional.relu(image)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(out, pd_out), out,
"custom op out: {},\n paddle api out: {}".format( pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out)) out, pd_out))
if batch_id == 5: if batch_id == 5:
......
...@@ -46,9 +46,11 @@ class TestCustomSimpleSliceJit(unittest.TestCase): ...@@ -46,9 +46,11 @@ class TestCustomSimpleSliceJit(unittest.TestCase):
x = paddle.to_tensor(np_x) x = paddle.to_tensor(np_x)
custom_op_out = custom_ops.custom_simple_slice(x, 2, 3) custom_op_out = custom_ops.custom_simple_slice(x, 2, 3)
np_out = np_x[2:3] np_out = np_x[2:3]
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(custom_op_out, np_out), custom_op_out,
"custom op: {},\n numpy: {}".format(np_out, custom_op_out.numpy())) np_out,
err_msg='custom op: {},\n numpy: {}'.format(np_out,
custom_op_out.numpy()))
def test_slice_output(self): def test_slice_output(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -47,9 +47,10 @@ class TestJitDispatch(unittest.TestCase): ...@@ -47,9 +47,10 @@ class TestJitDispatch(unittest.TestCase):
np_x = x.numpy() np_x = x.numpy()
np_out = out.numpy() np_out = out.numpy()
self.assertTrue(dtype in str(np_out.dtype)) self.assertTrue(dtype in str(np_out.dtype))
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(np_x, np_out), np_x,
"custom op x: {},\n custom op out: {}".format(np_x, np_out)) np_out,
err_msg='custom op x: {},\n custom op out: {}'.format(np_x, np_out))
def run_dispatch_test(self, func, dtype): def run_dispatch_test(self, func, dtype):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -70,14 +70,12 @@ class TestMultiOutputDtypes(unittest.TestCase): ...@@ -70,14 +70,12 @@ class TestMultiOutputDtypes(unittest.TestCase):
one_int32 = one_int32.numpy() one_int32 = one_int32.numpy()
# Fake_float64 # Fake_float64
self.assertTrue('float64' in str(zero_float64.dtype)) self.assertTrue('float64' in str(zero_float64.dtype))
self.assertTrue( np.testing.assert_array_equal(zero_float64,
np.array_equal(zero_float64, np.zeros([4, 8]).astype('float64'))
np.zeros([4, 8]).astype('float64')))
# ZFake_int32 # ZFake_int32
self.assertTrue('int32' in str(one_int32.dtype)) self.assertTrue('int32' in str(one_int32.dtype))
self.assertTrue( np.testing.assert_array_equal(one_int32,
np.array_equal(one_int32, np.ones([4, 8]).astype('int32'))
np.ones([4, 8]).astype('int32')))
def test_static(self): def test_static(self):
paddle.enable_static() paddle.enable_static()
......
...@@ -144,21 +144,21 @@ class TestCustomCPUPlugin(unittest.TestCase): ...@@ -144,21 +144,21 @@ class TestCustomCPUPlugin(unittest.TestCase):
place=paddle.CPUPlace()) place=paddle.CPUPlace())
custom_cpu_tensor = cpu_tensor._copy_to( custom_cpu_tensor = cpu_tensor._copy_to(
paddle.CustomPlace('custom_cpu', 0), True) paddle.CustomPlace('custom_cpu', 0), True)
self.assertTrue(np.array_equal(custom_cpu_tensor, x)) np.testing.assert_array_equal(custom_cpu_tensor, x)
self.assertTrue(custom_cpu_tensor.place.is_custom_place()) self.assertTrue(custom_cpu_tensor.place.is_custom_place())
# custom -> custom # custom -> custom
another_custom_cpu_tensor = custom_cpu_tensor._copy_to( another_custom_cpu_tensor = custom_cpu_tensor._copy_to(
paddle.CustomPlace('custom_cpu', 0), True) paddle.CustomPlace('custom_cpu', 0), True)
self.assertTrue(np.array_equal(another_custom_cpu_tensor, x)) np.testing.assert_array_equal(another_custom_cpu_tensor, x)
self.assertTrue(another_custom_cpu_tensor.place.is_custom_place()) self.assertTrue(another_custom_cpu_tensor.place.is_custom_place())
# custom -> cpu # custom -> cpu
another_cpu_tensor = custom_cpu_tensor._copy_to(paddle.CPUPlace(), True) another_cpu_tensor = custom_cpu_tensor._copy_to(paddle.CPUPlace(), True)
self.assertTrue(np.array_equal(another_cpu_tensor, x)) np.testing.assert_array_equal(another_cpu_tensor, x)
self.assertTrue(another_cpu_tensor.place.is_cpu_place()) self.assertTrue(another_cpu_tensor.place.is_cpu_place())
# custom -> custom self # custom -> custom self
another_custom_cpu_tensor = another_custom_cpu_tensor._copy_to( another_custom_cpu_tensor = another_custom_cpu_tensor._copy_to(
paddle.CustomPlace('custom_cpu', 0), True) paddle.CustomPlace('custom_cpu', 0), True)
self.assertTrue(np.array_equal(another_custom_cpu_tensor, x)) np.testing.assert_array_equal(another_custom_cpu_tensor, x)
self.assertTrue(another_custom_cpu_tensor.place.is_custom_place()) self.assertTrue(another_custom_cpu_tensor.place.is_custom_place())
def _test_fallback_kernel(self): def _test_fallback_kernel(self):
...@@ -168,7 +168,7 @@ class TestCustomCPUPlugin(unittest.TestCase): ...@@ -168,7 +168,7 @@ class TestCustomCPUPlugin(unittest.TestCase):
x = paddle.to_tensor([5, 4, 3], 'int16') x = paddle.to_tensor([5, 4, 3], 'int16')
y = paddle.to_tensor([1, 2, 3], 'int16') y = paddle.to_tensor([1, 2, 3], 'int16')
z = paddle.add(x, y) z = paddle.add(x, y)
self.assertTrue(np.array_equal(z, r)) np.testing.assert_array_equal(z, r)
def tearDown(self): def tearDown(self):
del os.environ['CUSTOM_DEVICE_ROOT'] del os.environ['CUSTOM_DEVICE_ROOT']
......
...@@ -617,9 +617,9 @@ class TestGenerateProposals(LayerTest): ...@@ -617,9 +617,9 @@ class TestGenerateProposals(LayerTest):
roi_probs_dy = roi_probs.numpy() roi_probs_dy = roi_probs.numpy()
rois_num_dy = rois_num.numpy() rois_num_dy = rois_num.numpy()
self.assertTrue(np.array_equal(np.array(rois_stat), rois_dy)) np.testing.assert_array_equal(np.array(rois_stat), rois_dy)
self.assertTrue(np.array_equal(np.array(roi_probs_stat), roi_probs_dy)) np.testing.assert_array_equal(np.array(roi_probs_stat), roi_probs_dy)
self.assertTrue(np.array_equal(np.array(rois_num_stat), rois_num_dy)) np.testing.assert_array_equal(np.array(rois_num_stat), rois_num_dy)
class TestYoloDetection(unittest.TestCase): class TestYoloDetection(unittest.TestCase):
...@@ -837,8 +837,8 @@ class TestCollectFpnPropsals(LayerTest): ...@@ -837,8 +837,8 @@ class TestCollectFpnPropsals(LayerTest):
fpn_rois_dy = fpn_rois_dy.numpy() fpn_rois_dy = fpn_rois_dy.numpy()
rois_num_dy = rois_num_dy.numpy() rois_num_dy = rois_num_dy.numpy()
self.assertTrue(np.array_equal(fpn_rois_stat, fpn_rois_dy)) np.testing.assert_array_equal(fpn_rois_stat, fpn_rois_dy)
self.assertTrue(np.array_equal(rois_num_stat, rois_num_dy)) np.testing.assert_array_equal(rois_num_stat, rois_num_dy)
def test_collect_fpn_proposals_error(self): def test_collect_fpn_proposals_error(self):
...@@ -932,7 +932,7 @@ class TestDistributeFpnProposals(LayerTest): ...@@ -932,7 +932,7 @@ class TestDistributeFpnProposals(LayerTest):
output_dy_np.append(output_np) output_dy_np.append(output_np)
for res_stat, res_dy in zip(output_stat_np, output_dy_np): for res_stat, res_dy in zip(output_stat_np, output_dy_np):
self.assertTrue(np.array_equal(res_stat, res_dy)) np.testing.assert_array_equal(res_stat, res_dy)
def test_distribute_fpn_proposals_error(self): def test_distribute_fpn_proposals_error(self):
program = Program() program = Program()
......
...@@ -71,11 +71,9 @@ class TestLoDTensor(unittest.TestCase): ...@@ -71,11 +71,9 @@ class TestLoDTensor(unittest.TestCase):
correct_recursive_seq_lens) correct_recursive_seq_lens)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT64) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT64)
self.assertEqual(tensor.shape(), [5, 1]) self.assertEqual(tensor.shape(), [5, 1])
self.assertTrue( np.testing.assert_array_equal(
np.array_equal( np.array(tensor),
np.array(tensor), np.array([1, 2, 3, 3, 4]).reshape(tensor.shape()).astype('int64'))
np.array([1, 2, 3, 3,
4]).reshape(tensor.shape()).astype('int64')))
# Create LoDTensor from numpy array # Create LoDTensor from numpy array
data = np.random.random([10, 1]).astype('float64') data = np.random.random([10, 1]).astype('float64')
...@@ -85,7 +83,7 @@ class TestLoDTensor(unittest.TestCase): ...@@ -85,7 +83,7 @@ class TestLoDTensor(unittest.TestCase):
recursive_seq_lens) recursive_seq_lens)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP64) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP64)
self.assertEqual(tensor.shape(), [10, 1]) self.assertEqual(tensor.shape(), [10, 1])
self.assertTrue(np.array_equal(np.array(tensor), data)) np.testing.assert_array_equal(np.array(tensor), data)
# Create LoDTensor from another LoDTensor, they are differnt instances # Create LoDTensor from another LoDTensor, they are differnt instances
new_recursive_seq_lens = [[2, 2, 1], [1, 2, 2, 3, 2]] new_recursive_seq_lens = [[2, 2, 1], [1, 2, 2, 3, 2]]
...@@ -133,9 +131,9 @@ class TestLoDTensor(unittest.TestCase): ...@@ -133,9 +131,9 @@ class TestLoDTensor(unittest.TestCase):
dltensor = tensor._to_dlpack() dltensor = tensor._to_dlpack()
tensor_from_dlpack = fluid.core.from_dlpack(dltensor) tensor_from_dlpack = fluid.core.from_dlpack(dltensor)
self.assertTrue(isinstance(tensor_from_dlpack, fluid.core.Tensor)) self.assertTrue(isinstance(tensor_from_dlpack, fluid.core.Tensor))
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(np.array(tensor_from_dlpack), np.array(tensor_from_dlpack),
np.array([[1], [2], [3], [4]]).astype('int'))) np.array([[1], [2], [3], [4]]).astype('int'))
# when build with cuda # when build with cuda
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
gtensor = fluid.create_lod_tensor( gtensor = fluid.create_lod_tensor(
...@@ -144,9 +142,9 @@ class TestLoDTensor(unittest.TestCase): ...@@ -144,9 +142,9 @@ class TestLoDTensor(unittest.TestCase):
gdltensor = gtensor._to_dlpack() gdltensor = gtensor._to_dlpack()
gtensor_from_dlpack = fluid.core.from_dlpack(gdltensor) gtensor_from_dlpack = fluid.core.from_dlpack(gdltensor)
self.assertTrue(isinstance(gtensor_from_dlpack, fluid.core.Tensor)) self.assertTrue(isinstance(gtensor_from_dlpack, fluid.core.Tensor))
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(np.array(gtensor_from_dlpack), np.array(gtensor_from_dlpack),
np.array([[1], [2], [3], [4]]).astype('int'))) np.array([[1], [2], [3], [4]]).astype('int'))
def test_as_type(self): def test_as_type(self):
tensor = fluid.create_lod_tensor( tensor = fluid.create_lod_tensor(
......
...@@ -164,7 +164,7 @@ class TestChooseShapeAttrOrApiWithLayer(unittest.TestCase): ...@@ -164,7 +164,7 @@ class TestChooseShapeAttrOrApiWithLayer(unittest.TestCase):
net = ShapeLayer() net = ShapeLayer()
out = net(x) out = net(x)
self.assertTrue(np.array_equal(out.numpy(), x.numpy())) np.testing.assert_array_equal(out.numpy(), x.numpy())
class TestIfElseNoValue(unittest.TestCase): class TestIfElseNoValue(unittest.TestCase):
......
...@@ -36,7 +36,7 @@ class TestDeepCopy(unittest.TestCase): ...@@ -36,7 +36,7 @@ class TestDeepCopy(unittest.TestCase):
self.assertFalse(isinstance(net.forward, StaticFunction)) self.assertFalse(isinstance(net.forward, StaticFunction))
self.assertTrue(id(copy_net), id(copy_net.forward.__self__)) self.assertTrue(id(copy_net), id(copy_net.forward.__self__))
self.assertTrue(np.array_equal(src_out.numpy(), copy_out.numpy())) np.testing.assert_array_equal(src_out.numpy(), copy_out.numpy())
def test_func(self): def test_func(self):
st_foo = paddle.jit.to_static(foo) st_foo = paddle.jit.to_static(foo)
...@@ -48,7 +48,7 @@ class TestDeepCopy(unittest.TestCase): ...@@ -48,7 +48,7 @@ class TestDeepCopy(unittest.TestCase):
new_foo = deepcopy(st_foo) new_foo = deepcopy(st_foo)
self.assertFalse(isinstance(new_foo, StaticFunction)) self.assertFalse(isinstance(new_foo, StaticFunction))
new_out = new_foo(x) new_out = new_foo(x)
self.assertTrue(np.array_equal(st_out.numpy(), new_out.numpy())) np.testing.assert_array_equal(st_out.numpy(), new_out.numpy())
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -201,7 +201,7 @@ class TestPruneUnusedParamInProgram(unittest.TestCase): ...@@ -201,7 +201,7 @@ class TestPruneUnusedParamInProgram(unittest.TestCase):
model.eval() model.eval()
input_ids = paddle.to_tensor(input_ids) input_ids = paddle.to_tensor(input_ids)
out = model(input_ids) out = model(input_ids)
self.assertTrue(np.array_equal(out.numpy(), [[15, 11]])) np.testing.assert_array_equal(out.numpy(), [[15, 11]])
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -82,7 +82,7 @@ class TestRollBackPlainFunction(unittest.TestCase): ...@@ -82,7 +82,7 @@ class TestRollBackPlainFunction(unittest.TestCase):
dy_out = st_foo(x) dy_out = st_foo(x)
self.assertTrue(func_to_source_code(foo) == func_to_source_code(st_foo)) self.assertTrue(func_to_source_code(foo) == func_to_source_code(st_foo))
self.assertTrue(np.array_equal(st_out.numpy(), dy_out.numpy())) np.testing.assert_array_equal(st_out.numpy(), dy_out.numpy())
class TestRollBackNet(unittest.TestCase): class TestRollBackNet(unittest.TestCase):
...@@ -111,15 +111,15 @@ class TestRollBackNet(unittest.TestCase): ...@@ -111,15 +111,15 @@ class TestRollBackNet(unittest.TestCase):
self.assertFalse(isinstance(net.forward, StaticFunction)) self.assertFalse(isinstance(net.forward, StaticFunction))
self.assertFalse("true_fn" in func_to_source_code(net.sub.forward)) self.assertFalse("true_fn" in func_to_source_code(net.sub.forward))
dy_fwd_out = net(x) dy_fwd_out = net(x)
self.assertTrue(np.array_equal(st_fwd_out.numpy(), dy_fwd_out.numpy())) np.testing.assert_array_equal(st_fwd_out.numpy(), dy_fwd_out.numpy())
# rollback infer into original dygraph method # rollback infer into original dygraph method
net.infer.rollback() net.infer.rollback()
self.assertFalse(isinstance(net.infer, StaticFunction)) self.assertFalse(isinstance(net.infer, StaticFunction))
self.assertFalse("true_fn" in func_to_source_code(net.sub.forward)) self.assertFalse("true_fn" in func_to_source_code(net.sub.forward))
dy_infer_out = net.infer(x) dy_infer_out = net.infer(x)
self.assertTrue( np.testing.assert_array_equal(st_infer_out.numpy(),
np.array_equal(st_infer_out.numpy(), dy_infer_out.numpy())) dy_infer_out.numpy())
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -208,8 +208,8 @@ class TestSliceSupplementSpecialCase(unittest.TestCase): ...@@ -208,8 +208,8 @@ class TestSliceSupplementSpecialCase(unittest.TestCase):
out = exe.run(prog, feed={'x': array}, fetch_list=[z1, z2]) out = exe.run(prog, feed={'x': array}, fetch_list=[z1, z2])
self.assertTrue(np.array_equal(out[0], array[::2])) np.testing.assert_array_equal(out[0], array[::2])
self.assertTrue(np.array_equal(out[1], array[::-2])) np.testing.assert_array_equal(out[1], array[::-2])
def test_static_slice_step_dygraph2static(self): def test_static_slice_step_dygraph2static(self):
paddle.disable_static() paddle.disable_static()
...@@ -225,10 +225,10 @@ class TestSliceSupplementSpecialCase(unittest.TestCase): ...@@ -225,10 +225,10 @@ class TestSliceSupplementSpecialCase(unittest.TestCase):
input_spec=[InputSpec(shape=[None, 4, 4])]) input_spec=[InputSpec(shape=[None, 4, 4])])
static_result = sfunc(inps) static_result = sfunc(inps)
self.assertTrue( np.testing.assert_array_equal(origin_result[0].numpy(),
np.array_equal(origin_result[0].numpy(), static_result[0].numpy())) static_result[0].numpy())
self.assertTrue( np.testing.assert_array_equal(origin_result[1].numpy(),
np.array_equal(origin_result[1].numpy(), static_result[1].numpy())) static_result[1].numpy())
class TestPaddleStridedSlice(unittest.TestCase): class TestPaddleStridedSlice(unittest.TestCase):
...@@ -268,10 +268,8 @@ class TestPaddleStridedSlice(unittest.TestCase): ...@@ -268,10 +268,8 @@ class TestPaddleStridedSlice(unittest.TestCase):
ends=e2, ends=e2,
strides=stride2) strides=stride2)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal( sl.numpy(), array[s2[0]:e2[0]:stride2[0], s2[1]:e2[1]:stride2[1]])
sl.numpy(), array[s2[0]:e2[0]:stride2[0],
s2[1]:e2[1]:stride2[1]]))
array = np.arange(6 * 7 * 8).reshape((6, 7, 8)) array = np.arange(6 * 7 * 8).reshape((6, 7, 8))
pt = paddle.to_tensor(array) pt = paddle.to_tensor(array)
...@@ -285,9 +283,10 @@ class TestPaddleStridedSlice(unittest.TestCase): ...@@ -285,9 +283,10 @@ class TestPaddleStridedSlice(unittest.TestCase):
strides=stride2) strides=stride2)
array_slice = array[s2[0]:e2[0]:stride2[0], ::, s2[1]:e2[1]:stride2[1]] array_slice = array[s2[0]:e2[0]:stride2[0], ::, s2[1]:e2[1]:stride2[1]]
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(sl.numpy(), array_slice), sl.numpy(),
msg="paddle.strided_slice:\n {} \n numpy slice:\n{}".format( array_slice,
err_msg='paddle.strided_slice:\n {} \n numpy slice:\n{}'.format(
sl.numpy(), array_slice)) sl.numpy(), array_slice))
......
...@@ -97,9 +97,9 @@ class TestCompatibility(unittest.TestCase): ...@@ -97,9 +97,9 @@ class TestCompatibility(unittest.TestCase):
for x, y in zip(gt, res): for x, y in zip(gt, res):
if isinstance(x, list): if isinstance(x, list):
for tx, ty in zip(x, y): for tx, ty in zip(x, y):
self.assertTrue(np.array_equal(tx, ty)) np.testing.assert_array_equal(tx, ty)
elif isinstance(x, np.ndarray): elif isinstance(x, np.ndarray):
self.assertTrue(np.array_equal(tx, ty)) np.testing.assert_array_equal(tx, ty)
else: else:
raise Exception("Not Implement!") raise Exception("Not Implement!")
......
...@@ -261,7 +261,7 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase): ...@@ -261,7 +261,7 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase):
res = self.run_new_executor(feed) res = self.run_new_executor(feed)
gt = self.run_raw_executor(feed) gt = self.run_raw_executor(feed)
for x, y in zip(gt, res): for x, y in zip(gt, res):
self.assertTrue(np.array_equal(x, y)) np.testing.assert_array_equal(x, y)
def test_with_error(self): def test_with_error(self):
feed = [{'a': np.ones([2, 2], dtype="float32")}] feed = [{'a': np.ones([2, 2], dtype="float32")}]
...@@ -277,7 +277,7 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase): ...@@ -277,7 +277,7 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase):
res = self.run_new_executor(feed, use_compiled=True) res = self.run_new_executor(feed, use_compiled=True)
gt = self.run_raw_executor(feed, use_compiled=True) gt = self.run_raw_executor(feed, use_compiled=True)
for x, y in zip(gt, res): for x, y in zip(gt, res):
self.assertTrue(np.array_equal(x, y)) np.testing.assert_array_equal(x, y)
def test_compiled_program_convert_graph_to_program(self): def test_compiled_program_convert_graph_to_program(self):
data = np.ones([2, 2], dtype="float32") data = np.ones([2, 2], dtype="float32")
...@@ -286,7 +286,7 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase): ...@@ -286,7 +286,7 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase):
res = self.run_new_executor(feed, use_compiled=True) res = self.run_new_executor(feed, use_compiled=True)
gt = self.run_raw_executor(feed, use_compiled=True) gt = self.run_raw_executor(feed, use_compiled=True)
for x, y in zip(gt, res): for x, y in zip(gt, res):
self.assertTrue(np.array_equal(x, y)) np.testing.assert_array_equal(x, y)
def test_empty_program(self): def test_empty_program(self):
program = paddle.static.Program() program = paddle.static.Program()
......
...@@ -185,7 +185,7 @@ class TestScaleApiStatic(unittest.TestCase): ...@@ -185,7 +185,7 @@ class TestScaleApiStatic(unittest.TestCase):
exe = paddle.static.Executor(place=paddle.CPUPlace()) exe = paddle.static.Executor(place=paddle.CPUPlace())
out = exe.run(main_prog, feed={"x": input}, fetch_list=[out]) out = exe.run(main_prog, feed={"x": input}, fetch_list=[out])
self.assertEqual(np.array_equal(out[0], input * 2.0 + 3.0), True) np.testing.assert_array_equal(out[0], input * 2.0 + 3.0)
class TestScaleInplaceApiStatic(TestScaleApiStatic): class TestScaleInplaceApiStatic(TestScaleApiStatic):
...@@ -204,7 +204,7 @@ class TestScaleApiDygraph(unittest.TestCase): ...@@ -204,7 +204,7 @@ class TestScaleApiDygraph(unittest.TestCase):
input = np.random.random([2, 25]).astype("float32") input = np.random.random([2, 25]).astype("float32")
x = paddle.to_tensor(input) x = paddle.to_tensor(input)
out = self._executed_api(x, scale=2.0, bias=3.0) out = self._executed_api(x, scale=2.0, bias=3.0)
self.assertEqual(np.array_equal(out.numpy(), input * 2.0 + 3.0), True) np.testing.assert_array_equal(out.numpy(), input * 2.0 + 3.0)
paddle.enable_static() paddle.enable_static()
......
...@@ -356,13 +356,13 @@ class TestMoveAxis(unittest.TestCase): ...@@ -356,13 +356,13 @@ class TestMoveAxis(unittest.TestCase):
exe = paddle.static.Executor() exe = paddle.static.Executor()
out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0] out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0]
self.assertEqual(np.array_equal(out_np, expected), True) np.testing.assert_array_equal(out_np, expected)
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor(x_np) x = paddle.to_tensor(x_np)
out = paddle.moveaxis(x, [0, 4, 3, 2], [1, 3, 2, 0]) out = paddle.moveaxis(x, [0, 4, 3, 2], [1, 3, 2, 0])
self.assertEqual(out.shape, [4, 2, 5, 7, 3]) self.assertEqual(out.shape, [4, 2, 5, 7, 3])
self.assertEqual(np.array_equal(out.numpy(), expected), True) np.testing.assert_array_equal(out.numpy(), expected)
paddle.enable_static() paddle.enable_static()
def test_moveaxis2(self): def test_moveaxis2(self):
...@@ -376,13 +376,13 @@ class TestMoveAxis(unittest.TestCase): ...@@ -376,13 +376,13 @@ class TestMoveAxis(unittest.TestCase):
exe = paddle.static.Executor() exe = paddle.static.Executor()
out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0] out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0]
self.assertEqual(np.array_equal(out_np, expected), True) np.testing.assert_array_equal(out_np, expected)
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor(x_np) x = paddle.to_tensor(x_np)
out = x.moveaxis(-2, -1) out = x.moveaxis(-2, -1)
self.assertEqual(out.shape, [2, 5, 3]) self.assertEqual(out.shape, [2, 5, 3])
self.assertEqual(np.array_equal(out.numpy(), expected), True) np.testing.assert_array_equal(out.numpy(), expected)
paddle.enable_static() paddle.enable_static()
def test_error(self): def test_error(self):
......
...@@ -1048,12 +1048,13 @@ class OpTest(unittest.TestCase): ...@@ -1048,12 +1048,13 @@ class OpTest(unittest.TestCase):
str(expect_out) + "\n" + "But Got" + str(actual_out) + str(expect_out) + "\n" + "But Got" + str(actual_out) +
" in class " + self.__class__.__name__) " in class " + self.__class__.__name__)
else: else:
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(expect_out, actual_out), expect_out,
"Output (" + name + ") has diff at " + str(place) + actual_out,
" when using and not using inplace" + "\nExpect " + err_msg='Output (' + name + ') has diff at ' + str(place) +
str(expect_out) + "\n" + "But Got" + str(actual_out) + ' when using and not using inplace' + '\nExpect ' +
" in class " + self.__class__.__name__ + '\n') str(expect_out) + '\n' + 'But Got' + str(actual_out) +
' in class ' + self.__class__.__name__ + '\n')
def _construct_grad_program_from_forward(self, fwd_program, grad_op_desc, def _construct_grad_program_from_forward(self, fwd_program, grad_op_desc,
op_grad_to_var): op_grad_to_var):
......
...@@ -51,7 +51,7 @@ class TestSparseSquareOp(unittest.TestCase): ...@@ -51,7 +51,7 @@ class TestSparseSquareOp(unittest.TestCase):
# get and compare result # get and compare result
result_array = np.array(out_selected_rows.get_tensor()) result_array = np.array(out_selected_rows.get_tensor())
self.assertTrue(np.array_equal(result_array, np.square(np_array))) np.testing.assert_array_equal(result_array, np.square(np_array))
def test_sparse_acti(self): def test_sparse_acti(self):
places = [core.CPUPlace()] places = [core.CPUPlace()]
......
...@@ -214,9 +214,9 @@ class TestAssignOApi(unittest.TestCase): ...@@ -214,9 +214,9 @@ class TestAssignOApi(unittest.TestCase):
y = clone_x**3 y = clone_x**3
y.backward() y.backward()
self.assertTrue(np.array_equal(x, [1, 1]), True) np.testing.assert_array_equal(x, [1, 1])
self.assertTrue(np.array_equal(clone_x.grad.numpy(), [3, 3]), True) np.testing.assert_array_equal(clone_x.grad.numpy(), [3, 3])
self.assertTrue(np.array_equal(x.grad.numpy(), [3, 3]), True) np.testing.assert_array_equal(x.grad.numpy(), [3, 3])
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.enable_static() paddle.enable_static()
...@@ -229,7 +229,7 @@ class TestAssignOApi(unittest.TestCase): ...@@ -229,7 +229,7 @@ class TestAssignOApi(unittest.TestCase):
feed={'X': x_np}, feed={'X': x_np},
fetch_list=[clone_x])[0] fetch_list=[clone_x])[0]
self.assertTrue(np.array_equal(y_np, x_np), True) np.testing.assert_array_equal(y_np, x_np)
paddle.disable_static() paddle.disable_static()
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from __future__ import print_function from __future__ import print_function
import unittest import unittest
import numpy import numpy as np
import op_test import op_test
import paddle import paddle
...@@ -39,7 +39,7 @@ class TestAssignValueOp(op_test.OpTest): ...@@ -39,7 +39,7 @@ class TestAssignValueOp(op_test.OpTest):
self.outputs = {"Out": self.value} self.outputs = {"Out": self.value}
def init_data(self): def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(numpy.float32) self.value = np.random.random(size=(2, 5)).astype(np.float32)
self.attrs["fp32_values"] = [float(v) for v in self.value.flat] self.attrs["fp32_values"] = [float(v) for v in self.value.flat]
def test_forward(self): def test_forward(self):
...@@ -49,22 +49,22 @@ class TestAssignValueOp(op_test.OpTest): ...@@ -49,22 +49,22 @@ class TestAssignValueOp(op_test.OpTest):
class TestAssignValueOp2(TestAssignValueOp): class TestAssignValueOp2(TestAssignValueOp):
def init_data(self): def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(numpy.int32) self.value = np.random.random(size=(2, 5)).astype(np.int32)
self.attrs["int32_values"] = [int(v) for v in self.value.flat] self.attrs["int32_values"] = [int(v) for v in self.value.flat]
class TestAssignValueOp3(TestAssignValueOp): class TestAssignValueOp3(TestAssignValueOp):
def init_data(self): def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(numpy.int64) self.value = np.random.random(size=(2, 5)).astype(np.int64)
self.attrs["int64_values"] = [int(v) for v in self.value.flat] self.attrs["int64_values"] = [int(v) for v in self.value.flat]
class TestAssignValueOp4(TestAssignValueOp): class TestAssignValueOp4(TestAssignValueOp):
def init_data(self): def init_data(self):
self.value = numpy.random.choice(a=[False, True], self.value = np.random.choice(a=[False, True],
size=(2, 5)).astype(numpy.bool) size=(2, 5)).astype(np.bool)
self.attrs["bool_values"] = [int(v) for v in self.value.flat] self.attrs["bool_values"] = [int(v) for v in self.value.flat]
...@@ -72,7 +72,7 @@ class TestAssignApi(unittest.TestCase): ...@@ -72,7 +72,7 @@ class TestAssignApi(unittest.TestCase):
def setUp(self): def setUp(self):
self.init_dtype() self.init_dtype()
self.value = (-100 + 200 * numpy.random.random(size=(2, 5))).astype( self.value = (-100 + 200 * np.random.random(size=(2, 5))).astype(
self.dtype) self.dtype)
self.place = fluid.CUDAPlace( self.place = fluid.CUDAPlace(
0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
...@@ -88,8 +88,10 @@ class TestAssignApi(unittest.TestCase): ...@@ -88,8 +88,10 @@ class TestAssignApi(unittest.TestCase):
exe = fluid.Executor(self.place) exe = fluid.Executor(self.place)
[fetched_x] = exe.run(main_program, feed={}, fetch_list=[x]) [fetched_x] = exe.run(main_program, feed={}, fetch_list=[x])
self.assertTrue(numpy.array_equal(fetched_x, self.value), np.testing.assert_array_equal(fetched_x,
"fetch_x=%s val=%s" % (fetched_x, self.value)) self.value,
err_msg='fetch_x=%s val=%s' %
(fetched_x, self.value))
self.assertEqual(fetched_x.dtype, self.value.dtype) self.assertEqual(fetched_x.dtype, self.value.dtype)
...@@ -109,8 +111,8 @@ class TestAssignApi4(TestAssignApi): ...@@ -109,8 +111,8 @@ class TestAssignApi4(TestAssignApi):
def setUp(self): def setUp(self):
self.init_dtype() self.init_dtype()
self.value = numpy.random.choice(a=[False, True], self.value = np.random.choice(a=[False, True],
size=(2, 5)).astype(numpy.bool) size=(2, 5)).astype(np.bool)
self.place = fluid.CUDAPlace( self.place = fluid.CUDAPlace(
0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
......
...@@ -371,7 +371,7 @@ class TestBuffer(unittest.TestCase): ...@@ -371,7 +371,7 @@ class TestBuffer(unittest.TestCase):
self.func_test_buffer_state_dict() self.func_test_buffer_state_dict()
def assert_var_base_equal(self, var1, var2): def assert_var_base_equal(self, var1, var2):
self.assertTrue(np.array_equal(var1.numpy(), var2.numpy())) np.testing.assert_array_equal(var1.numpy(), var2.numpy())
class BufferNetWithModification(paddle.nn.Layer): class BufferNetWithModification(paddle.nn.Layer):
...@@ -414,8 +414,8 @@ class TestModifiedBuffer(unittest.TestCase): ...@@ -414,8 +414,8 @@ class TestModifiedBuffer(unittest.TestCase):
st_outs = self._run(True) st_outs = self._run(True)
for i in range(len(dy_outs)): for i in range(len(dy_outs)):
self.assertTrue( np.testing.assert_array_equal(dy_outs[i].numpy(),
np.array_equal(dy_outs[i].numpy(), st_outs[i].numpy())) st_outs[i].numpy())
def test_modified(self): def test_modified(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -91,9 +91,8 @@ class TestBeamSearchDecodeOp(unittest.TestCase): ...@@ -91,9 +91,8 @@ class TestBeamSearchDecodeOp(unittest.TestCase):
expected_data = np.array( expected_data = np.array(
[0, 2, 3, 1, 0, 2, 1, 0, 4, 5, 3, 5, 0, 4, 5, 3, 1], "int64") [0, 2, 3, 1, 0, 2, 1, 0, 4, 5, 3, 5, 0, 4, 5, 3, 1], "int64")
self.assertTrue(np.array_equal(np.array(sentence_ids), expected_data)) np.testing.assert_array_equal(np.array(sentence_ids), expected_data)
self.assertTrue(np.array_equal(np.array(sentence_scores), np.testing.assert_array_equal(np.array(sentence_scores), expected_data)
expected_data))
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
......
...@@ -90,7 +90,7 @@ class TestRandomValue(unittest.TestCase): ...@@ -90,7 +90,7 @@ class TestRandomValue(unittest.TestCase):
self.assertEqual(np.sum(index1), 8582429431) self.assertEqual(np.sum(index1), 8582429431)
self.assertEqual(np.sum(index2), 8581445798) self.assertEqual(np.sum(index2), 8581445798)
expect = [0., 0., 0., 0., 0., 0., 0., 1., 1., 1.] expect = [0., 0., 0., 0., 0., 0., 0., 1., 1., 1.]
self.assertTrue(np.array_equal(y[16, 500, 500:510], expect)) np.testing.assert_array_equal(y[16, 500, 500:510], expect)
x = paddle.to_tensor(x_np, dtype='float32') x = paddle.to_tensor(x_np, dtype='float32')
y = paddle.bernoulli(x).numpy() y = paddle.bernoulli(x).numpy()
...@@ -99,7 +99,7 @@ class TestRandomValue(unittest.TestCase): ...@@ -99,7 +99,7 @@ class TestRandomValue(unittest.TestCase):
self.assertEqual(np.sum(index1), 8583509076) self.assertEqual(np.sum(index1), 8583509076)
self.assertEqual(np.sum(index2), 8582778540) self.assertEqual(np.sum(index2), 8582778540)
expect = [0., 0., 1., 1., 1., 1., 0., 1., 1., 1.] expect = [0., 0., 1., 1., 1., 1., 0., 1., 1., 1.]
self.assertTrue(np.array_equal(y[16, 500, 500:510], expect)) np.testing.assert_array_equal(y[16, 500, 500:510], expect)
paddle.enable_static() paddle.enable_static()
......
...@@ -118,9 +118,11 @@ class InplaceTestBase(unittest.TestCase): ...@@ -118,9 +118,11 @@ class InplaceTestBase(unittest.TestCase):
fetch_val2, = exe.run(compiled_prog, fetch_val2, = exe.run(compiled_prog,
feed=feed_dict, feed=feed_dict,
fetch_list=[fetch_var]) fetch_list=[fetch_var])
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(fetch_val1, fetch_val2), fetch_val1,
"error var name: {}, fetch_val1: {}, fetch_val2: {}" fetch_val2,
err_msg=
'error var name: {}, fetch_val1: {}, fetch_val2: {}'
.format( .format(
fetch_var, fetch_var,
fetch_val1[~np.equal(fetch_val1, fetch_val2)], fetch_val1[~np.equal(fetch_val1, fetch_val2)],
...@@ -167,13 +169,14 @@ class InplaceTestBase(unittest.TestCase): ...@@ -167,13 +169,14 @@ class InplaceTestBase(unittest.TestCase):
fetch_vals.append(fetch_val) fetch_vals.append(fetch_val)
for item in fetch_vals: for item in fetch_vals:
self.assertTrue(np.array_equal(fetch_vals[0], item)) np.testing.assert_array_equal(fetch_vals[0], item)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(fetch_vals[0], item), fetch_vals[0],
"error var name: {}, fetch_vals[0]: {}, item: {}". item,
format(fetch_var, err_msg='error var name: {}, fetch_vals[0]: {}, item: {}'
fetch_vals[0][~np.equal(fetch_vals[0], item)], .format(fetch_var,
item[~np.equal(fetch_vals[0], item)])) fetch_vals[0][~np.equal(fetch_vals[0], item)],
item[~np.equal(fetch_vals[0], item)]))
class CUDAInplaceTest(InplaceTestBase): class CUDAInplaceTest(InplaceTestBase):
......
...@@ -100,7 +100,7 @@ class TestGradientWithPrune(unittest.TestCase): ...@@ -100,7 +100,7 @@ class TestGradientWithPrune(unittest.TestCase):
out = exe.run(main, out = exe.run(main,
feed={'x': np.ones([3]).astype('float32')}, feed={'x': np.ones([3]).astype('float32')},
fetch_list=[x1_grad]) fetch_list=[x1_grad])
self.assertTrue(np.array_equal(out[0], [2., 0., 0.])) np.testing.assert_array_equal(out[0], [2.0, 0.0, 0.0])
class TestDoubleGradient(unittest.TestCase): class TestDoubleGradient(unittest.TestCase):
......
...@@ -130,11 +130,10 @@ class TestCastOpEager(unittest.TestCase): ...@@ -130,11 +130,10 @@ class TestCastOpEager(unittest.TestCase):
x = paddle.ones([2, 2], dtype="float16") x = paddle.ones([2, 2], dtype="float16")
x.stop_gradient = False x.stop_gradient = False
out = paddle.cast(x, "float32") out = paddle.cast(x, "float32")
self.assertTrue( np.testing.assert_array_equal(out,
np.array_equal(out, np.ones([2, 2]).astype('float32'))
np.ones([2, 2]).astype("float32")))
out.backward() out.backward()
self.assertTrue(np.array_equal(x.gradient(), x.numpy())) np.testing.assert_array_equal(x.gradient(), x.numpy())
self.assertTrue(x.gradient().dtype == np.float16) self.assertTrue(x.gradient().dtype == np.float16)
......
...@@ -68,7 +68,7 @@ class TestCompiledProgram(unittest.TestCase): ...@@ -68,7 +68,7 @@ class TestCompiledProgram(unittest.TestCase):
"label": self.label "label": self.label
}, },
fetch_list=[loss.name]) fetch_list=[loss.name])
self.assertTrue(np.array_equal(loss_data[0], self.loss)) np.testing.assert_array_equal(loss_data[0], self.loss)
def test_compiled_program_with_data_parallel(self): def test_compiled_program_with_data_parallel(self):
with new_program_scope(): with new_program_scope():
...@@ -90,7 +90,7 @@ class TestCompiledProgram(unittest.TestCase): ...@@ -90,7 +90,7 @@ class TestCompiledProgram(unittest.TestCase):
"label": self.label "label": self.label
}, },
fetch_list=[loss.name]) fetch_list=[loss.name])
self.assertTrue(np.array_equal(loss_data[0], self.loss)) np.testing.assert_array_equal(loss_data[0], self.loss)
class TestCompiledProgramError(unittest.TestCase): class TestCompiledProgramError(unittest.TestCase):
......
...@@ -447,10 +447,8 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase): ...@@ -447,10 +447,8 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
self.assertTrue(self.out_var.shape[self.axis] == -1) self.assertTrue(self.out_var.shape[self.axis] == -1)
exe = fluid.Executor(self.place) exe = fluid.Executor(self.place)
res = exe.run(self.program, fetch_list=self.out_var) res = exe.run(self.program, fetch_list=self.out_var)
self.assertTrue( np.testing.assert_array_equal(
np.array_equal( res[0], np.concatenate([self.x] * self.iter_num, axis=self.axis))
res[0], np.concatenate([self.x] * self.iter_num,
axis=self.axis)))
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -84,7 +84,7 @@ class TestComplexConjOp(unittest.TestCase): ...@@ -84,7 +84,7 @@ class TestComplexConjOp(unittest.TestCase):
var_x = paddle.to_tensor(input) var_x = paddle.to_tensor(input)
result = paddle.conj(var_x).numpy() result = paddle.conj(var_x).numpy()
target = np.conj(input) target = np.conj(input)
self.assertTrue(np.array_equal(result, target)) np.testing.assert_array_equal(result, target)
def test_conj_operator(self): def test_conj_operator(self):
for dtype in self._dtypes: for dtype in self._dtypes:
...@@ -96,7 +96,7 @@ class TestComplexConjOp(unittest.TestCase): ...@@ -96,7 +96,7 @@ class TestComplexConjOp(unittest.TestCase):
var_x = paddle.to_tensor(input) var_x = paddle.to_tensor(input)
result = var_x.conj().numpy() result = var_x.conj().numpy()
target = np.conj(input) target = np.conj(input)
self.assertTrue(np.array_equal(result, target)) np.testing.assert_array_equal(result, target)
def test_conj_static_mode(self): def test_conj_static_mode(self):
...@@ -118,7 +118,7 @@ class TestComplexConjOp(unittest.TestCase): ...@@ -118,7 +118,7 @@ class TestComplexConjOp(unittest.TestCase):
exe = static.Executor(place) exe = static.Executor(place)
out_value = exe.run(feed=input_dict, fetch_list=[out.name]) out_value = exe.run(feed=input_dict, fetch_list=[out.name])
self.assertTrue(np.array_equal(np_res, out_value[0])) np.testing.assert_array_equal(np_res, out_value[0])
def test_conj_api_real_number(self): def test_conj_api_real_number(self):
for dtype in self._dtypes: for dtype in self._dtypes:
...@@ -128,7 +128,7 @@ class TestComplexConjOp(unittest.TestCase): ...@@ -128,7 +128,7 @@ class TestComplexConjOp(unittest.TestCase):
var_x = paddle.to_tensor(input) var_x = paddle.to_tensor(input)
result = paddle.conj(var_x).numpy() result = paddle.conj(var_x).numpy()
target = np.conj(input) target = np.conj(input)
self.assertTrue(np.array_equal(result, target)) np.testing.assert_array_equal(result, target)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -174,10 +174,10 @@ class TestCUDAGraph(unittest.TestCase): ...@@ -174,10 +174,10 @@ class TestCUDAGraph(unittest.TestCase):
y_np = y.numpy() y_np = y.numpy()
y_np_expected = np.concatenate(xs_np) y_np_expected = np.concatenate(xs_np)
self.assertTrue(np.array_equal(y_np, y_np_expected)) np.testing.assert_array_equal(y_np, y_np_expected)
self.assertEqual(len(zs), len(xs_np)) self.assertEqual(len(zs), len(xs_np))
for i, z in enumerate(zs): for i, z in enumerate(zs):
self.assertTrue(np.array_equal(z.numpy(), xs_np[i])) np.testing.assert_array_equal(z.numpy(), xs_np[i])
output_dir = 'cuda_graph_dot_{}'.format(os.getpid()) output_dir = 'cuda_graph_dot_{}'.format(os.getpid())
try: try:
...@@ -233,8 +233,8 @@ class TestCUDAGraph(unittest.TestCase): ...@@ -233,8 +233,8 @@ class TestCUDAGraph(unittest.TestCase):
graph.replay() graph.replay()
actual_x = np.array([[i]]).astype(dtype) actual_x = np.array([[i]]).astype(dtype)
actual_y = np.array([[i * i]]).astype(dtype) actual_y = np.array([[i * i]]).astype(dtype)
self.assertTrue(np.array_equal(actual_x, x.numpy())) np.testing.assert_array_equal(actual_x, x.numpy())
self.assertTrue(np.array_equal(actual_y, y.numpy())) np.testing.assert_array_equal(actual_y, y.numpy())
def test_dev_ctx_alloc(self): def test_dev_ctx_alloc(self):
if not can_use_cuda_graph(): if not can_use_cuda_graph():
......
...@@ -68,9 +68,9 @@ class TestSimpleModel(unittest.TestCase): ...@@ -68,9 +68,9 @@ class TestSimpleModel(unittest.TestCase):
layer, value2 = self.run_base(func, True, "default") layer, value2 = self.run_base(func, True, "default")
_, value3 = self.run_base(func, True, "new") _, value3 = self.run_base(func, True, "new")
_, value4 = self.run_base(func, True, layer) _, value4 = self.run_base(func, True, layer)
self.assertTrue(np.array_equal(value1, value2)) np.testing.assert_array_equal(value1, value2)
self.assertTrue(np.array_equal(value1, value3)) np.testing.assert_array_equal(value1, value3)
self.assertTrue(np.array_equal(value1, value4)) np.testing.assert_array_equal(value1, value4)
def test_layer(self): def test_layer(self):
self.check(SimpleModel(10, 20)) self.check(SimpleModel(10, 20))
......
...@@ -128,7 +128,7 @@ class TestStreamGuard(unittest.TestCase): ...@@ -128,7 +128,7 @@ class TestStreamGuard(unittest.TestCase):
# kernels to be completed on windows. # kernels to be completed on windows.
s.synchronize() s.synchronize()
self.assertTrue(np.array_equal(np.array(c), np.array(d))) np.testing.assert_array_equal(np.array(c), np.array(d))
def test_stream_guard_default_stream(self): def test_stream_guard_default_stream(self):
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
......
...@@ -31,15 +31,15 @@ class TestCumsumOp(unittest.TestCase): ...@@ -31,15 +31,15 @@ class TestCumsumOp(unittest.TestCase):
y = paddle.cumsum(data) y = paddle.cumsum(data)
z = np.cumsum(data_np) z = np.cumsum(data_np)
self.assertTrue(np.array_equal(z, y.numpy())) np.testing.assert_array_equal(z, y.numpy())
y = paddle.cumsum(data, axis=0) y = paddle.cumsum(data, axis=0)
z = np.cumsum(data_np, axis=0) z = np.cumsum(data_np, axis=0)
self.assertTrue(np.array_equal(z, y.numpy())) np.testing.assert_array_equal(z, y.numpy())
y = paddle.cumsum(data, axis=-1) y = paddle.cumsum(data, axis=-1)
z = np.cumsum(data_np, axis=-1) z = np.cumsum(data_np, axis=-1)
self.assertTrue(np.array_equal(z, y.numpy())) np.testing.assert_array_equal(z, y.numpy())
y = paddle.cumsum(data, dtype='float64') y = paddle.cumsum(data, dtype='float64')
self.assertTrue(y.dtype == core.VarDesc.VarType.FP64) self.assertTrue(y.dtype == core.VarDesc.VarType.FP64)
...@@ -49,7 +49,7 @@ class TestCumsumOp(unittest.TestCase): ...@@ -49,7 +49,7 @@ class TestCumsumOp(unittest.TestCase):
y = paddle.cumsum(data, axis=-2) y = paddle.cumsum(data, axis=-2)
z = np.cumsum(data_np, axis=-2) z = np.cumsum(data_np, axis=-2)
self.assertTrue(np.array_equal(z, y.numpy())) np.testing.assert_array_equal(z, y.numpy())
def run_static(self, use_gpu=False): def run_static(self, use_gpu=False):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
......
...@@ -93,8 +93,8 @@ class TestClass(unittest.TestCase): ...@@ -93,8 +93,8 @@ class TestClass(unittest.TestCase):
L1 = np.array(L1) L1 = np.array(L1)
L2 = np.array(L2) L2 = np.array(L2)
self.assertTrue(np.array_equal(I1, I2)) np.testing.assert_array_equal(I1, I2)
self.assertTrue(np.array_equal(L1, L2)) np.testing.assert_array_equal(L1, L2)
batch_id += 1 batch_id += 1
if break_beforehand and batch_id >= int( if break_beforehand and batch_id >= int(
......
...@@ -138,9 +138,8 @@ class TestDygraph(unittest.TestCase): ...@@ -138,9 +138,8 @@ class TestDygraph(unittest.TestCase):
np.array([[1, 3], [3, 5]]).astype(np.float32)) np.array([[1, 3], [3, 5]]).astype(np.float32))
y1 = fluid.dygraph.to_variable( y1 = fluid.dygraph.to_variable(
np.array([[2, 5], [6, 8]]).astype(np.float32)) np.array([[2, 5], [6, 8]]).astype(np.float32))
self.assertTrue( np.testing.assert_array_equal(
np.array_equal( paddle.dot(x1, y1).numpy(), np.array([[17], [58]]))
paddle.dot(x1, y1).numpy(), np.array([[17], [58]])))
class TestComplexDotOp(OpTest): class TestComplexDotOp(OpTest):
......
...@@ -1013,10 +1013,9 @@ class TestDropoutBackward(unittest.TestCase): ...@@ -1013,10 +1013,9 @@ class TestDropoutBackward(unittest.TestCase):
out, mask = core.ops.dropout(input, 'dropout_prob', 0.5) out, mask = core.ops.dropout(input, 'dropout_prob', 0.5)
out.backward() out.backward()
self.assertTrue( np.testing.assert_array_equal(
np.array_equal( input.gradient(),
input.gradient(), self.cal_grad_downscale_in_infer(mask.numpy()))
self.cal_grad_downscale_in_infer(mask.numpy())))
def test_backward_downscale_in_infer_eager(self): def test_backward_downscale_in_infer_eager(self):
for place in self.places: for place in self.places:
...@@ -1027,10 +1026,9 @@ class TestDropoutBackward(unittest.TestCase): ...@@ -1027,10 +1026,9 @@ class TestDropoutBackward(unittest.TestCase):
out, mask = _C_ops.final_state_dropout( out, mask = _C_ops.final_state_dropout(
input, None, 0.5, False, "downgrade_in_infer", 0, False) input, None, 0.5, False, "downgrade_in_infer", 0, False)
out.backward() out.backward()
self.assertTrue( np.testing.assert_array_equal(
np.array_equal( input.gradient(),
input.gradient(), self.cal_grad_downscale_in_infer(mask.numpy()))
self.cal_grad_downscale_in_infer(mask.numpy())))
def test_backward_upscale_train(self): def test_backward_upscale_train(self):
_enable_legacy_dygraph() _enable_legacy_dygraph()
......
...@@ -82,7 +82,7 @@ class TestDynRNNStopGradient(unittest.TestCase): ...@@ -82,7 +82,7 @@ class TestDynRNNStopGradient(unittest.TestCase):
value2 = build_and_run_program(place, self.batch_size, value2 = build_and_run_program(place, self.batch_size,
self.beam_size, True) self.beam_size, True)
self.assertTrue(np.array_equal(value1, value2)) np.testing.assert_array_equal(value1, value2)
def test_check_main(self): def test_check_main(self):
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
......
...@@ -108,11 +108,11 @@ class TestRunProgram(unittest.TestCase): ...@@ -108,11 +108,11 @@ class TestRunProgram(unittest.TestCase):
loss = paddle.mean(out_t) loss = paddle.mean(out_t)
loss.backward() loss.backward()
self.assertTrue(np.array_equal(np.ones([2, 2]) * 4, out_t.numpy())) np.testing.assert_array_equal(np.ones([2, 2]) * 4, out_t.numpy())
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(np.ones([2, 4]) * 0.5, x_t.grad.numpy())) np.ones([2, 4]) * 0.5, x_t.grad.numpy())
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(np.ones([4, 2]) * 0.5, y_t.grad.numpy())) np.ones([4, 2]) * 0.5, y_t.grad.numpy())
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -32,7 +32,7 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase): ...@@ -32,7 +32,7 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase):
out_arr = out.numpy() out_arr = out.numpy()
out_arr_expected = np.add(np_x, np_y) out_arr_expected = np.add(np_x, np_y)
self.assertTrue(np.array_equal(out_arr, out_arr_expected)) np.testing.assert_array_equal(out_arr, out_arr_expected)
def test_sum(self): def test_sum(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -42,7 +42,7 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase): ...@@ -42,7 +42,7 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase):
out = paddle.sum(x, axis=0) out = paddle.sum(x, axis=0)
out_arr = out.numpy() out_arr = out.numpy()
out_arr_expected = np.sum(x_data, axis=0) out_arr_expected = np.sum(x_data, axis=0)
self.assertTrue(np.array_equal(out_arr, out_arr_expected)) np.testing.assert_array_equal(out_arr, out_arr_expected)
def test_mm(self): def test_mm(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -40,28 +40,28 @@ class EagerStringTensorTestCase(unittest.TestCase): ...@@ -40,28 +40,28 @@ class EagerStringTensorTestCase(unittest.TestCase):
ST2 = core.eager.StringTensor(shape, "ST2") # constructor 2 ST2 = core.eager.StringTensor(shape, "ST2") # constructor 2
self.assertEqual(ST2.name, "ST2") self.assertEqual(ST2.name, "ST2")
self.assertEqual(ST2.shape, shape) self.assertEqual(ST2.shape, shape)
self.assertTrue( np.testing.assert_array_equal(ST2.numpy(),
np.array_equal(ST2.numpy(), np.empty(shape, dtype=np.unicode_))) np.empty(shape, dtype=np.unicode_))
ST3 = core.eager.StringTensor(self.str_arr, "ST3") # constructor 3 ST3 = core.eager.StringTensor(self.str_arr, "ST3") # constructor 3
self.assertEqual(ST3.name, "ST3") self.assertEqual(ST3.name, "ST3")
self.assertEqual(ST3.shape, list(self.str_arr.shape)) self.assertEqual(ST3.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST3.numpy(), self.str_arr)) np.testing.assert_array_equal(ST3.numpy(), self.str_arr)
ST4 = core.eager.StringTensor(self.str_arr) # constructor 4 ST4 = core.eager.StringTensor(self.str_arr) # constructor 4
self.assertEqual(ST4.name, "generated_string_tensor_1") self.assertEqual(ST4.name, "generated_string_tensor_1")
self.assertEqual(ST4.shape, list(self.str_arr.shape)) self.assertEqual(ST4.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST4.numpy(), self.str_arr)) np.testing.assert_array_equal(ST4.numpy(), self.str_arr)
ST5 = core.eager.StringTensor(ST4) # constructor 5 ST5 = core.eager.StringTensor(ST4) # constructor 5
self.assertEqual(ST5.name, "generated_string_tensor_2") self.assertEqual(ST5.name, "generated_string_tensor_2")
self.assertEqual(ST5.shape, list(self.str_arr.shape)) self.assertEqual(ST5.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST5.numpy(), self.str_arr)) np.testing.assert_array_equal(ST5.numpy(), self.str_arr)
ST6 = core.eager.StringTensor(ST5, "ST6") # constructor 6 ST6 = core.eager.StringTensor(ST5, "ST6") # constructor 6
self.assertEqual(ST6.name, "ST6") self.assertEqual(ST6.name, "ST6")
self.assertEqual(ST6.shape, list(self.str_arr.shape)) self.assertEqual(ST6.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST6.numpy(), self.str_arr)) np.testing.assert_array_equal(ST6.numpy(), self.str_arr)
for st in [ST1, ST2, ST3, ST4, ST5, ST6]: for st in [ST1, ST2, ST3, ST4, ST5, ST6]:
# All StringTensors are on cpu place so far. # All StringTensors are on cpu place so far.
...@@ -74,25 +74,25 @@ class EagerStringTensorTestCase(unittest.TestCase): ...@@ -74,25 +74,25 @@ class EagerStringTensorTestCase(unittest.TestCase):
name="ST1") # constructor 2 name="ST1") # constructor 2
self.assertEqual(ST1.name, "ST1") self.assertEqual(ST1.name, "ST1")
self.assertEqual(ST1.shape, shape) self.assertEqual(ST1.shape, shape)
self.assertTrue( np.testing.assert_array_equal(ST1.numpy(),
np.array_equal(ST1.numpy(), np.empty(shape, dtype=np.unicode_))) np.empty(shape, dtype=np.unicode_))
ST2 = core.eager.StringTensor(self.str_arr, ST2 = core.eager.StringTensor(self.str_arr,
name="ST2") # constructor 3 name="ST2") # constructor 3
self.assertEqual(ST2.name, "ST2") self.assertEqual(ST2.name, "ST2")
self.assertEqual(ST2.shape, list(self.str_arr.shape)) self.assertEqual(ST2.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST2.numpy(), self.str_arr)) np.testing.assert_array_equal(ST2.numpy(), self.str_arr)
ST3 = core.eager.StringTensor(ST2, name="ST3") # constructor 6 ST3 = core.eager.StringTensor(ST2, name="ST3") # constructor 6
self.assertEqual(ST3.name, "ST3") self.assertEqual(ST3.name, "ST3")
self.assertEqual(ST3.shape, list(self.str_arr.shape)) self.assertEqual(ST3.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST3.numpy(), self.str_arr)) np.testing.assert_array_equal(ST3.numpy(), self.str_arr)
ST4 = core.eager.StringTensor(value=ST2, ST4 = core.eager.StringTensor(value=ST2,
name="ST4") # constructor 6 name="ST4") # constructor 6
self.assertEqual(ST4.name, "ST4") self.assertEqual(ST4.name, "ST4")
self.assertEqual(ST4.shape, list(self.str_arr.shape)) self.assertEqual(ST4.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST4.numpy(), self.str_arr)) np.testing.assert_array_equal(ST4.numpy(), self.str_arr)
for st in [ST1, ST2, ST3, ST4]: for st in [ST1, ST2, ST3, ST4]:
# All StringTensors are on cpu place so far. # All StringTensors are on cpu place so far.
self.assertTrue(st.place._equals(core.CPUPlace())) self.assertTrue(st.place._equals(core.CPUPlace()))
......
...@@ -206,11 +206,15 @@ class TestElementwiseMinOpFP16(unittest.TestCase): ...@@ -206,11 +206,15 @@ class TestElementwiseMinOpFP16(unittest.TestCase):
z_1, x_g_1, y_g_1 = self.get_out_and_grad(x_np, y_np, axis, place, z_1, x_g_1, y_g_1 = self.get_out_and_grad(x_np, y_np, axis, place,
False) False)
z_2, x_g_2, y_g_2 = self.get_out_and_grad(x_np, y_np, axis, place, True) z_2, x_g_2, y_g_2 = self.get_out_and_grad(x_np, y_np, axis, place, True)
self.assertTrue(np.array_equal(z_1, z_2), "{} vs {}".format(z_1, z_2)) np.testing.assert_array_equal(z_1,
self.assertTrue(np.array_equal(x_g_1, x_g_2), z_2,
"{} vs {}".format(x_g_1, x_g_2)) err_msg='{} vs {}'.format(z_1, z_2))
self.assertTrue(np.array_equal(y_g_1, y_g_2), np.testing.assert_array_equal(x_g_1,
"{} vs {}".format(y_g_1, y_g_2)) x_g_2,
err_msg='{} vs {}'.format(x_g_1, x_g_2))
np.testing.assert_array_equal(y_g_1,
y_g_2,
err_msg='{} vs {}'.format(y_g_1, y_g_2))
def test_main(self): def test_main(self):
self.check_main((13, 17), (13, 17)) self.check_main((13, 17), (13, 17))
......
...@@ -210,9 +210,9 @@ class TestElementwisePowGradOpInt(unittest.TestCase): ...@@ -210,9 +210,9 @@ class TestElementwisePowGradOpInt(unittest.TestCase):
y.stop_gradient = False y.stop_gradient = False
res = x**y res = x**y
res.backward() res.backward()
self.assertTrue(np.array_equal(res.gradient(), self.grad_res)) np.testing.assert_array_equal(res.gradient(), self.grad_res)
self.assertTrue(np.array_equal(x.gradient(), self.grad_x)) np.testing.assert_array_equal(x.gradient(), self.grad_x)
self.assertTrue(np.array_equal(y.gradient(), self.grad_y)) np.testing.assert_array_equal(y.gradient(), self.grad_y)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
......
...@@ -36,7 +36,7 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase): ...@@ -36,7 +36,7 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase):
for p in self.get_places(): for p in self.get_places():
grad_value1 = self.run_program(p, stop_gradient=False) grad_value1 = self.run_program(p, stop_gradient=False)
grad_value2 = self.run_program(p, stop_gradient=True) grad_value2 = self.run_program(p, stop_gradient=True)
self.assertTrue(np.array_equal(grad_value1, grad_value2)) np.testing.assert_array_equal(grad_value1, grad_value2)
def run_program(self, place, stop_gradient=False): def run_program(self, place, stop_gradient=False):
np.random.seed(1) np.random.seed(1)
......
...@@ -50,7 +50,7 @@ class TestCheckFetchList(unittest.TestCase): ...@@ -50,7 +50,7 @@ class TestCheckFetchList(unittest.TestCase):
fetch_list=[self.fetch_list], # support single list/tuple fetch_list=[self.fetch_list], # support single list/tuple
return_numpy=True) return_numpy=True)
self.assertTrue(np.array_equal(res[0], self.expected)) np.testing.assert_array_equal(res[0], self.expected)
def test_with_error(self): def test_with_error(self):
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
......
...@@ -65,7 +65,7 @@ class TestExecutorReturnTensorNotOverwritingWithOptest(OpTest): ...@@ -65,7 +65,7 @@ class TestExecutorReturnTensorNotOverwritingWithOptest(OpTest):
add_out1 = np.array(add_out[0]) add_out1 = np.array(add_out[0])
mul_out = self.calc_mul_out(place, parallel) mul_out = self.calc_mul_out(place, parallel)
add_out2 = np.array(add_out[0]) add_out2 = np.array(add_out[0])
self.assertTrue(np.array_equal(add_out1, add_out2)) np.testing.assert_array_equal(add_out1, add_out2)
class TestExecutorReturnTensorNotOverOverwritingWithLayers(unittest.TestCase): class TestExecutorReturnTensorNotOverOverwritingWithLayers(unittest.TestCase):
...@@ -108,7 +108,7 @@ class TestExecutorReturnTensorNotOverOverwritingWithLayers(unittest.TestCase): ...@@ -108,7 +108,7 @@ class TestExecutorReturnTensorNotOverOverwritingWithLayers(unittest.TestCase):
add_out1 = np.array(add_out[0]) add_out1 = np.array(add_out[0])
sub_out = self.calc_sub_out(place, parallel) sub_out = self.calc_sub_out(place, parallel)
add_out2 = np.array(add_out[0]) add_out2 = np.array(add_out[0])
self.assertTrue(np.array_equal(add_out1, add_out2)) np.testing.assert_array_equal(add_out1, add_out2)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -272,10 +272,8 @@ class TestExpandDygraphAPI(unittest.TestCase): ...@@ -272,10 +272,8 @@ class TestExpandDygraphAPI(unittest.TestCase):
c = paddle.fluid.layers.expand(a, c = paddle.fluid.layers.expand(a,
expand_times=paddle.to_tensor( expand_times=paddle.to_tensor(
[2, 3], dtype='int32')) [2, 3], dtype='int32'))
self.assertTrue( np.testing.assert_array_equal(b.numpy(), np.tile(a.numpy(), [2, 3]))
np.array_equal(b.numpy(), np.tile(a.numpy(), [2, 3]))) np.testing.assert_array_equal(c.numpy(), np.tile(a.numpy(), [2, 3]))
self.assertTrue(
np.array_equal(c.numpy(), np.tile(a.numpy(), [2, 3])))
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -277,11 +277,11 @@ class TestExpandV2DygraphAPI(unittest.TestCase): ...@@ -277,11 +277,11 @@ class TestExpandV2DygraphAPI(unittest.TestCase):
np_array = np.array([2, 5]) np_array = np.array([2, 5])
expand_2 = paddle.expand(a, shape=np_array) expand_2 = paddle.expand(a, shape=np_array)
self.assertTrue( np.testing.assert_array_equal(egr_expand_1.numpy(),
np.array_equal(egr_expand_1.numpy(), egr_expand_2.numpy())) egr_expand_2.numpy())
self.assertTrue(np.array_equal(expand_1.numpy(), expand_2.numpy())) np.testing.assert_array_equal(expand_1.numpy(), expand_2.numpy())
self.assertTrue( np.testing.assert_array_equal(expand_1.numpy(),
np.array_equal(expand_1.numpy(), egr_expand_1.numpy())) egr_expand_1.numpy())
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -94,7 +94,7 @@ class TestExponentialAPI(unittest.TestCase): ...@@ -94,7 +94,7 @@ class TestExponentialAPI(unittest.TestCase):
self.assertTrue(np.min(y.numpy()) >= 0) self.assertTrue(np.min(y.numpy()) >= 0)
y.backward() y.backward()
self.assertTrue(np.array_equal(x.grad.numpy(), np.zeros([10, 10]))) np.testing.assert_array_equal(x.grad.numpy(), np.zeros([10, 10]))
paddle.enable_static() paddle.enable_static()
def test_fixed_random_number(self): def test_fixed_random_number(self):
......
...@@ -173,7 +173,7 @@ class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase): ...@@ -173,7 +173,7 @@ class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase):
res_1 = run_program(-1) res_1 = run_program(-1)
res_2 = run_program(2) res_2 = run_program(2)
self.assertTrue(np.array_equal(res_1, res_2)) np.testing.assert_array_equal(res_1, res_2)
class TestFCOpError(unittest.TestCase): class TestFCOpError(unittest.TestCase):
......
...@@ -17,14 +17,14 @@ from __future__ import print_function ...@@ -17,14 +17,14 @@ from __future__ import print_function
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import op_test import op_test
import numpy import numpy as np
import unittest import unittest
class TestFetchVar(unittest.TestCase): class TestFetchVar(unittest.TestCase):
def set_input(self): def set_input(self):
self.val = numpy.array([1, 3, 5]).astype(numpy.int32) self.val = np.array([1, 3, 5]).astype(np.int32)
def test_fetch_var(self): def test_fetch_var(self):
self.set_input() self.set_input()
...@@ -33,15 +33,17 @@ class TestFetchVar(unittest.TestCase): ...@@ -33,15 +33,17 @@ class TestFetchVar(unittest.TestCase):
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_main_program(), feed={}, fetch_list=[]) exe.run(fluid.default_main_program(), feed={}, fetch_list=[])
fetched_x = fluid.executor._fetch_var("x") fetched_x = fluid.executor._fetch_var("x")
self.assertTrue(numpy.array_equal(fetched_x, self.val), np.testing.assert_array_equal(fetched_x,
"fetch_x=%s val=%s" % (fetched_x, self.val)) self.val,
err_msg='fetch_x=%s val=%s' %
(fetched_x, self.val))
self.assertEqual(fetched_x.dtype, self.val.dtype) self.assertEqual(fetched_x.dtype, self.val.dtype)
class TestFetchNullVar(TestFetchVar): class TestFetchNullVar(TestFetchVar):
def set_input(self): def set_input(self):
self.val = numpy.array([]).astype(numpy.int32) self.val = np.array([]).astype(np.int32)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -110,7 +110,7 @@ class TestFillAnyInplace(unittest.TestCase): ...@@ -110,7 +110,7 @@ class TestFillAnyInplace(unittest.TestCase):
y = 2 * x y = 2 * x
y.fill_(1) y.fill_(1)
y.backward() y.backward()
self.assertTrue(np.array_equal(x.grad.numpy(), np.zeros([10, 10]))) np.testing.assert_array_equal(x.grad.numpy(), np.zeros([10, 10]))
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -127,7 +127,7 @@ class TestFillConstantOpWithSelectedRows(unittest.TestCase): ...@@ -127,7 +127,7 @@ class TestFillConstantOpWithSelectedRows(unittest.TestCase):
result_array = np.array(out.get_tensor()) result_array = np.array(out.get_tensor())
full_array = np.full((123, 92), 3.8, 'float32') full_array = np.full((123, 92), 3.8, 'float32')
self.assertTrue(np.array_equal(result_array, full_array)) np.testing.assert_array_equal(result_array, full_array)
def test_fill_constant_with_selected_rows(self): def test_fill_constant_with_selected_rows(self):
places = [core.CPUPlace()] places = [core.CPUPlace()]
......
...@@ -78,7 +78,7 @@ class TestFillOp3(unittest.TestCase): ...@@ -78,7 +78,7 @@ class TestFillOp3(unittest.TestCase):
result_array = np.array(out) result_array = np.array(out)
full_array = np.array(val, 'float32') full_array = np.array(val, 'float32')
self.assertTrue(np.array_equal(result_array, full_array)) np.testing.assert_array_equal(result_array, full_array)
def test_fill_op(self): def test_fill_op(self):
places = [core.CPUPlace()] places = [core.CPUPlace()]
......
...@@ -97,19 +97,19 @@ class TestFusedMatmulBias(unittest.TestCase): ...@@ -97,19 +97,19 @@ class TestFusedMatmulBias(unittest.TestCase):
z = fused_matmul_bias(x, y, bias, trans_x, trans_y) z = fused_matmul_bias(x, y, bias, trans_x, trans_y)
z_np = matmul(x_np, y_np, bias_np, trans_x, trans_y) z_np = matmul(x_np, y_np, bias_np, trans_x, trans_y)
self.assertTrue(np.array_equal(z.numpy(), z_np)) np.testing.assert_array_equal(z.numpy(), z_np)
z_grad_np = self.rand_data(z_np.shape, dtype) z_grad_np = self.rand_data(z_np.shape, dtype)
paddle.autograd.backward(z, grad_tensors=[paddle.to_tensor(z_grad_np)]) paddle.autograd.backward(z, grad_tensors=[paddle.to_tensor(z_grad_np)])
x_grad_np, y_grad_np, bias_grad_np = matmul_grad( x_grad_np, y_grad_np, bias_grad_np = matmul_grad(
x_np, y_np, bias_np, z_grad_np, trans_x, trans_y) x_np, y_np, bias_np, z_grad_np, trans_x, trans_y)
self.assertTrue(np.array_equal(x.grad.numpy(), x_grad_np)) np.testing.assert_array_equal(x.grad.numpy(), x_grad_np)
self.assertEqual(y_grad_np.shape, y_np.shape) self.assertEqual(y_grad_np.shape, y_np.shape)
self.assertTrue(np.array_equal(y.grad.numpy(), y_grad_np)) np.testing.assert_array_equal(y.grad.numpy(), y_grad_np)
if need_bias: if need_bias:
self.assertTrue(np.array_equal(bias.grad.numpy(), bias_grad_np)) np.testing.assert_array_equal(bias.grad.numpy(), bias_grad_np)
else: else:
self.assertTrue(bias_grad_np is None) self.assertTrue(bias_grad_np is None)
...@@ -138,7 +138,7 @@ class TestFusedLinear(unittest.TestCase): ...@@ -138,7 +138,7 @@ class TestFusedLinear(unittest.TestCase):
linear = FusedLinear(40, 50, transpose_weight=transpose) linear = FusedLinear(40, 50, transpose_weight=transpose)
y1 = linear(x) y1 = linear(x)
y2 = fused_linear(x, linear.weight, linear.bias, transpose) y2 = fused_linear(x, linear.weight, linear.bias, transpose)
self.assertTrue(np.array_equal(y1.numpy(), y2.numpy())) np.testing.assert_array_equal(y1.numpy(), y2.numpy())
def test_non_transpose(self): def test_non_transpose(self):
self.check_fused_linear(False) self.check_fused_linear(False)
......
...@@ -341,7 +341,7 @@ class API_TestDygraphGather(unittest.TestCase): ...@@ -341,7 +341,7 @@ class API_TestDygraphGather(unittest.TestCase):
gpu_value = gpu_exe.run(feed=feed, fetch_list=fetch)[0] gpu_value = gpu_exe.run(feed=feed, fetch_list=fetch)[0]
return gpu_value return gpu_value
self.assertTrue(np.array_equal(test_dygraph(), test_static_graph())) np.testing.assert_array_equal(test_dygraph(), test_static_graph())
class TestGathertError(unittest.TestCase): class TestGathertError(unittest.TestCase):
......
...@@ -324,8 +324,8 @@ class TestAmpScaler(unittest.TestCase): ...@@ -324,8 +324,8 @@ class TestAmpScaler(unittest.TestCase):
for param in model.parameters(): for param in model.parameters():
# param not update when tensor contains nan or inf # param not update when tensor contains nan or inf
self.assertTrue( np.testing.assert_array_equal(param.numpy(),
np.array_equal(param.numpy(), params_init[param.name])) params_init[param.name])
def test_nan_inf(self): def test_nan_inf(self):
self.nan_inf() self.nan_inf()
...@@ -974,7 +974,7 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase): ...@@ -974,7 +974,7 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase):
fetch_list=fetch_targets) fetch_list=fetch_targets)
print("pred.numpy()", pred.numpy()) print("pred.numpy()", pred.numpy())
print("result", results[0]) print("result", results[0])
self.assertTrue(np.array_equal(pred.numpy(), results[0])) np.testing.assert_array_equal(pred.numpy(), results[0])
paddle.disable_static() paddle.disable_static()
def test_inference_save_load(self): def test_inference_save_load(self):
......
...@@ -323,8 +323,8 @@ class TestAmpScaler(unittest.TestCase): ...@@ -323,8 +323,8 @@ class TestAmpScaler(unittest.TestCase):
for param in model.parameters(): for param in model.parameters():
# param not update when tensor contains nan or inf # param not update when tensor contains nan or inf
self.assertTrue( np.testing.assert_array_equal(param.numpy(),
np.array_equal(param.numpy(), params_init[param.name])) params_init[param.name])
def test_nan_inf(self): def test_nan_inf(self):
self.nan_inf() self.nan_inf()
...@@ -965,7 +965,7 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase): ...@@ -965,7 +965,7 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase):
fetch_list=fetch_targets) fetch_list=fetch_targets)
print("pred.numpy()", pred.numpy()) print("pred.numpy()", pred.numpy())
print("result", results[0]) print("result", results[0])
self.assertTrue(np.array_equal(pred.numpy(), results[0])) np.testing.assert_array_equal(pred.numpy(), results[0])
paddle.disable_static() paddle.disable_static()
def test_inference_save_load(self): def test_inference_save_load(self):
......
...@@ -314,8 +314,8 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -314,8 +314,8 @@ class TestImperativeAutoPrune(unittest.TestCase):
learning_rate=0.003, learning_rate=0.003,
parameter_list=(linear.parameters() + linear2.parameters())) parameter_list=(linear.parameters() + linear2.parameters()))
optimizer.minimize(out2) optimizer.minimize(out2)
self.assertTrue( np.testing.assert_array_equal(linear2_origin,
np.array_equal(linear2_origin, linear2.weight.numpy())) linear2.weight.numpy())
self.assertFalse( self.assertFalse(
np.array_equal(linear_origin, linear.weight.numpy())) np.array_equal(linear_origin, linear.weight.numpy()))
...@@ -344,10 +344,9 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -344,10 +344,9 @@ class TestImperativeAutoPrune(unittest.TestCase):
learning_rate=0.003, learning_rate=0.003,
parameter_list=(linear.parameters() + linear2.parameters())) parameter_list=(linear.parameters() + linear2.parameters()))
optimizer.minimize(out2) optimizer.minimize(out2)
self.assertTrue( np.testing.assert_array_equal(linear2_origin,
np.array_equal(linear2_origin, linear2.weight.numpy())) linear2.weight.numpy())
self.assertTrue(np.array_equal(linear_origin, np.testing.assert_array_equal(linear_origin, linear.weight.numpy())
linear.weight.numpy()))
try: try:
linear2.weight.gradient() linear2.weight.gradient()
except ValueError as e: except ValueError as e:
......
...@@ -159,8 +159,8 @@ class TestImperative(unittest.TestCase): ...@@ -159,8 +159,8 @@ class TestImperative(unittest.TestCase):
out.backward() out.backward()
dy_grad2 = mlp._linear1.weight.gradient() dy_grad2 = mlp._linear1.weight.gradient()
self.assertFalse(fluid.dygraph.enabled()) self.assertFalse(fluid.dygraph.enabled())
self.assertTrue(np.array_equal(dy_out1, dy_out2)) np.testing.assert_array_equal(dy_out1, dy_out2)
self.assertTrue(np.array_equal(dy_grad1, dy_grad2)) np.testing.assert_array_equal(dy_grad1, dy_grad2)
def test_functional_dygraph_context(self): def test_functional_dygraph_context(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -190,8 +190,8 @@ class TestImperative(unittest.TestCase): ...@@ -190,8 +190,8 @@ class TestImperative(unittest.TestCase):
dy_grad2 = mlp._linear1.weight.gradient() dy_grad2 = mlp._linear1.weight.gradient()
paddle.enable_static() paddle.enable_static()
self.assertFalse(paddle.in_dynamic_mode()) self.assertFalse(paddle.in_dynamic_mode())
self.assertTrue(np.array_equal(dy_out1, dy_out2)) np.testing.assert_array_equal(dy_out1, dy_out2)
self.assertTrue(np.array_equal(dy_grad1, dy_grad2)) np.testing.assert_array_equal(dy_grad1, dy_grad2)
def test_functional_paddle_imperative_dygraph_context(self): def test_functional_paddle_imperative_dygraph_context(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -229,12 +229,12 @@ class TestImperative(unittest.TestCase): ...@@ -229,12 +229,12 @@ class TestImperative(unittest.TestCase):
egr_tmp5 = fluid.core.eager.Tensor(value=x) egr_tmp5 = fluid.core.eager.Tensor(value=x)
egr_tmp6 = fluid.core.eager.Tensor(t) egr_tmp6 = fluid.core.eager.Tensor(t)
self.assertTrue(np.array_equal(x, egr_tmp.numpy())) np.testing.assert_array_equal(x, egr_tmp.numpy())
self.assertTrue(np.array_equal(y, egr_tmp2.numpy())) np.testing.assert_array_equal(y, egr_tmp2.numpy())
self.assertTrue(np.array_equal(x, egr_tmp3.numpy())) np.testing.assert_array_equal(x, egr_tmp3.numpy())
self.assertTrue(np.array_equal(y, egr_tmp4.numpy())) np.testing.assert_array_equal(y, egr_tmp4.numpy())
self.assertTrue(np.array_equal(x, egr_tmp5.numpy())) np.testing.assert_array_equal(x, egr_tmp5.numpy())
self.assertTrue(np.array_equal(x, egr_tmp6.numpy())) np.testing.assert_array_equal(x, egr_tmp6.numpy())
else: else:
tmp = fluid.core.VarBase(value=x, place=fluid.core.CPUPlace()) tmp = fluid.core.VarBase(value=x, place=fluid.core.CPUPlace())
tmp2 = fluid.core.VarBase(y, fluid.core.CPUPlace()) tmp2 = fluid.core.VarBase(y, fluid.core.CPUPlace())
...@@ -243,12 +243,12 @@ class TestImperative(unittest.TestCase): ...@@ -243,12 +243,12 @@ class TestImperative(unittest.TestCase):
tmp5 = fluid.core.VarBase(value=x) tmp5 = fluid.core.VarBase(value=x)
tmp6 = fluid.core.VarBase(t) tmp6 = fluid.core.VarBase(t)
self.assertTrue(np.array_equal(x, tmp.numpy())) np.testing.assert_array_equal(x, tmp.numpy())
self.assertTrue(np.array_equal(y, tmp2.numpy())) np.testing.assert_array_equal(y, tmp2.numpy())
self.assertTrue(np.array_equal(x, tmp3.numpy())) np.testing.assert_array_equal(x, tmp3.numpy())
self.assertTrue(np.array_equal(y, tmp4.numpy())) np.testing.assert_array_equal(y, tmp4.numpy())
self.assertTrue(np.array_equal(x, tmp5.numpy())) np.testing.assert_array_equal(x, tmp5.numpy())
self.assertTrue(np.array_equal(x, tmp6.numpy())) np.testing.assert_array_equal(x, tmp6.numpy())
def test_create_varbase(self): def test_create_varbase(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -479,10 +479,10 @@ class TestImperative(unittest.TestCase): ...@@ -479,10 +479,10 @@ class TestImperative(unittest.TestCase):
feed={inp.name: np_inp}, feed={inp.name: np_inp},
fetch_list=[x.name, param_grads[1].name]) fetch_list=[x.name, param_grads[1].name])
self.assertTrue(np.array_equal(dy_out, static_out)) np.testing.assert_array_equal(dy_out, static_out)
self.assertTrue(np.array_equal(dy_grad, static_grad)) np.testing.assert_array_equal(dy_grad, static_grad)
self.assertTrue(np.array_equal(dy_out2, static_out)) np.testing.assert_array_equal(dy_out2, static_out)
self.assertTrue(np.array_equal(dy_grad2, static_grad)) np.testing.assert_array_equal(dy_grad2, static_grad)
def test_layer_in_out(self): def test_layer_in_out(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
...@@ -577,9 +577,9 @@ class TestImperative(unittest.TestCase): ...@@ -577,9 +577,9 @@ class TestImperative(unittest.TestCase):
loss2 = x * z loss2 = x * z
loss1.backward(retain_graph=True) loss1.backward(retain_graph=True)
loss2.backward(retain_graph=True) loss2.backward(retain_graph=True)
self.assertTrue(np.array_equal(x.grad.numpy(), [23.])) np.testing.assert_array_equal(x.grad.numpy(), [23.0])
self.assertTrue(np.array_equal(y.grad.numpy(), [25.])) np.testing.assert_array_equal(y.grad.numpy(), [25.0])
self.assertTrue(np.array_equal(z.grad.numpy(), [5.])) np.testing.assert_array_equal(z.grad.numpy(), [5.0])
x.clear_grad() x.clear_grad()
y.clear_grad() y.clear_grad()
z.clear_grad() z.clear_grad()
...@@ -592,13 +592,13 @@ class TestImperative(unittest.TestCase): ...@@ -592,13 +592,13 @@ class TestImperative(unittest.TestCase):
loss = fun(x, y, z) loss = fun(x, y, z)
loss.backward(retain_graph=True) loss.backward(retain_graph=True)
# x.grad = 2*x*y + z + 2*y = 27 # x.grad = 2*x*y + z + 2*y = 27
self.assertTrue(np.array_equal(x.grad.numpy(), [27])) np.testing.assert_array_equal(x.grad.numpy(), [27])
loss.backward(retain_graph=True) loss.backward(retain_graph=True)
self.assertTrue(np.array_equal(x.grad.numpy(), [54])) np.testing.assert_array_equal(x.grad.numpy(), [54])
loss.backward() loss.backward()
self.assertTrue(np.array_equal(x.grad.numpy(), [81])) np.testing.assert_array_equal(x.grad.numpy(), [81])
with self.assertRaises(RuntimeError): with self.assertRaises(RuntimeError):
loss.backward() loss.backward()
...@@ -608,8 +608,8 @@ class TestImperative(unittest.TestCase): ...@@ -608,8 +608,8 @@ class TestImperative(unittest.TestCase):
dx = paddle.grad([loss1], x, create_graph=True)[0] dx = paddle.grad([loss1], x, create_graph=True)[0]
loss = loss1 + loss2 + dx loss = loss1 + loss2 + dx
loss.backward() loss.backward()
self.assertTrue(np.array_equal(dx.grad.numpy(), [1])) np.testing.assert_array_equal(dx.grad.numpy(), [1])
self.assertTrue(np.array_equal(x.grad.numpy(), [108])) np.testing.assert_array_equal(x.grad.numpy(), [108])
def test_mlp(sort_sum_gradient): def test_mlp(sort_sum_gradient):
fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient}) fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient})
...@@ -641,7 +641,7 @@ class TestImperative(unittest.TestCase): ...@@ -641,7 +641,7 @@ class TestImperative(unittest.TestCase):
loss = mlp1(x) loss = mlp1(x)
loss.backward() loss.backward()
self.assertTrue(np.array_equal(loss.grad.numpy(), [1])) np.testing.assert_array_equal(loss.grad.numpy(), [1])
self.assertTrue( self.assertTrue(
np.allclose(mlp1._linear1.weight.grad.numpy(), np.allclose(mlp1._linear1.weight.grad.numpy(),
expected_weight1_grad)) expected_weight1_grad))
...@@ -656,7 +656,7 @@ class TestImperative(unittest.TestCase): ...@@ -656,7 +656,7 @@ class TestImperative(unittest.TestCase):
expected_bias2_grad)) expected_bias2_grad))
mlp2.clear_gradients() mlp2.clear_gradients()
self.assertTrue(np.array_equal(clear_loss.grad.numpy(), [1])) np.testing.assert_array_equal(clear_loss.grad.numpy(), [1])
if ((batch_id + 1) % 10) % 2 == 0: if ((batch_id + 1) % 10) % 2 == 0:
mlp1.clear_gradients() mlp1.clear_gradients()
expected_weight1_grad = 0. expected_weight1_grad = 0.
...@@ -785,14 +785,14 @@ class TestImperative(unittest.TestCase): ...@@ -785,14 +785,14 @@ class TestImperative(unittest.TestCase):
param_grads[1][1].name, param_grads[2][1].name param_grads[1][1].name, param_grads[2][1].name
]) ])
self.assertTrue(np.array_equal(dy_out, static_out)) np.testing.assert_array_equal(dy_out, static_out)
self.assertTrue(np.array_equal(dy_grad_h2o, static_grad_h2o)) np.testing.assert_array_equal(dy_grad_h2o, static_grad_h2o)
self.assertTrue(np.array_equal(dy_grad_h2h, static_grad_h2h)) np.testing.assert_array_equal(dy_grad_h2h, static_grad_h2h)
self.assertTrue(np.array_equal(dy_grad_i2h, static_grad_i2h)) np.testing.assert_array_equal(dy_grad_i2h, static_grad_i2h)
self.assertTrue(np.array_equal(dy_out2, static_out)) np.testing.assert_array_equal(dy_out2, static_out)
self.assertTrue(np.array_equal(dy_grad_h2o2, static_grad_h2o)) np.testing.assert_array_equal(dy_grad_h2o2, static_grad_h2o)
self.assertTrue(np.array_equal(dy_grad_h2h2, static_grad_h2h)) np.testing.assert_array_equal(dy_grad_h2h2, static_grad_h2h)
self.assertTrue(np.array_equal(dy_grad_i2h2, static_grad_i2h)) np.testing.assert_array_equal(dy_grad_i2h2, static_grad_i2h)
def test_rnn(self): def test_rnn(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -846,7 +846,7 @@ class TestDygraphUtils(unittest.TestCase): ...@@ -846,7 +846,7 @@ class TestDygraphUtils(unittest.TestCase):
a = paddle.to_tensor(a_np) a = paddle.to_tensor(a_np)
res1 = func(a, act="hard_sigmoid") res1 = func(a, act="hard_sigmoid")
res2 = fluid.layers.hard_sigmoid(a) res2 = fluid.layers.hard_sigmoid(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) np.testing.assert_array_equal(res1.numpy(), res2.numpy())
def test_append_activation_in_dygraph1(self): def test_append_activation_in_dygraph1(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -875,7 +875,7 @@ class TestDygraphUtils(unittest.TestCase): ...@@ -875,7 +875,7 @@ class TestDygraphUtils(unittest.TestCase):
a = paddle.to_tensor(a_np) a = paddle.to_tensor(a_np)
res1 = func(a, act="sigmoid", use_cudnn=True) res1 = func(a, act="sigmoid", use_cudnn=True)
res2 = fluid.layers.sigmoid(a) res2 = fluid.layers.sigmoid(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) np.testing.assert_array_equal(res1.numpy(), res2.numpy())
def test_append_activation_in_dygraph3(self): def test_append_activation_in_dygraph3(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -892,7 +892,7 @@ class TestDygraphUtils(unittest.TestCase): ...@@ -892,7 +892,7 @@ class TestDygraphUtils(unittest.TestCase):
a = paddle.to_tensor(a_np) a = paddle.to_tensor(a_np)
res1 = func(a) res1 = func(a)
res2 = fluid.layers.relu(a) res2 = fluid.layers.relu(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) np.testing.assert_array_equal(res1.numpy(), res2.numpy())
def test_append_activation_in_dygraph_use_mkldnn(self): def test_append_activation_in_dygraph_use_mkldnn(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -911,7 +911,7 @@ class TestDygraphUtils(unittest.TestCase): ...@@ -911,7 +911,7 @@ class TestDygraphUtils(unittest.TestCase):
finally: finally:
fluid.set_flags({'FLAGS_use_mkldnn': False}) fluid.set_flags({'FLAGS_use_mkldnn': False})
res2 = fluid.layers.relu(a) res2 = fluid.layers.relu(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) np.testing.assert_array_equal(res1.numpy(), res2.numpy())
def test_append_activation_in_dygraph_global_use_mkldnn(self): def test_append_activation_in_dygraph_global_use_mkldnn(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -937,7 +937,7 @@ class TestDygraphUtils(unittest.TestCase): ...@@ -937,7 +937,7 @@ class TestDygraphUtils(unittest.TestCase):
a = paddle.to_tensor(a_np) a = paddle.to_tensor(a_np)
res1 = func(a, bias=a) res1 = func(a, bias=a)
res2 = paddle.add(a, a) res2 = paddle.add(a, a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) np.testing.assert_array_equal(res1.numpy(), res2.numpy())
def test_append_bias_in_dygraph(self): def test_append_bias_in_dygraph(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -58,8 +58,8 @@ class TestDataParallelStateDict(unittest.TestCase): ...@@ -58,8 +58,8 @@ class TestDataParallelStateDict(unittest.TestCase):
for k, v in single_state.items(): for k, v in single_state.items():
self.assertTrue(k in parallel_state) self.assertTrue(k in parallel_state)
self.assertTrue( np.testing.assert_array_equal(v.numpy(),
np.array_equal(v.numpy(), parallel_state[k].numpy())) parallel_state[k].numpy())
base_para[k] = v.numpy() base_para[k] = v.numpy()
...@@ -75,7 +75,7 @@ class TestDataParallelStateDict(unittest.TestCase): ...@@ -75,7 +75,7 @@ class TestDataParallelStateDict(unittest.TestCase):
parallel_state = parallel_mlp.state_dict() parallel_state = parallel_mlp.state_dict()
for k, v in parallel_state.items(): for k, v in parallel_state.items():
self.assertTrue(np.array_equal(v.numpy(), base_para[k])) np.testing.assert_array_equal(v.numpy(), base_para[k])
parallel_mlp.load_dict(base_para) parallel_mlp.load_dict(base_para)
......
...@@ -186,9 +186,8 @@ class TestEagerGrad(TestCase): ...@@ -186,9 +186,8 @@ class TestEagerGrad(TestCase):
out4 = paddle.mean(out3) out4 = paddle.mean(out3)
egr_dout2, egr_dout3 = paddle.grad([out4], [out2, out3]) egr_dout2, egr_dout3 = paddle.grad([out4], [out2, out3])
self.assertTrue( np.testing.assert_array_equal(dout2_record_by_hook[0].numpy(),
np.array_equal(dout2_record_by_hook[0].numpy(), np.array([1.0, 2.0]))
np.array([1., 2.])))
x1 = paddle.to_tensor([1.0, 2.0]) x1 = paddle.to_tensor([1.0, 2.0])
x1.stop_gradient = False x1.stop_gradient = False
...@@ -203,8 +202,8 @@ class TestEagerGrad(TestCase): ...@@ -203,8 +202,8 @@ class TestEagerGrad(TestCase):
self.assertEqual(dout2.stop_gradient, egr_dout2.stop_gradient) self.assertEqual(dout2.stop_gradient, egr_dout2.stop_gradient)
self.assertEqual(dout3.stop_gradient, egr_dout3.stop_gradient) self.assertEqual(dout3.stop_gradient, egr_dout3.stop_gradient)
self.assertTrue(np.array_equal(dout2.numpy(), egr_dout2.numpy())) np.testing.assert_array_equal(dout2.numpy(), egr_dout2.numpy())
self.assertTrue(np.array_equal(dout3.numpy(), egr_dout3.numpy())) np.testing.assert_array_equal(dout3.numpy(), egr_dout3.numpy())
class TestDygraphDoubleGrad(TestCase): class TestDygraphDoubleGrad(TestCase):
...@@ -392,15 +391,13 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -392,15 +391,13 @@ class TestDygraphDoubleGrad(TestCase):
if grad_y is not None: if grad_y is not None:
self.assertTrue(grad_y.stop_gradient) self.assertTrue(grad_y.stop_gradient)
self.assertTrue( np.testing.assert_array_equal(grad_y.numpy(),
np.array_equal(grad_y.numpy(), original_random_grad_y)
original_random_grad_y))
if grad_z is not None: if grad_z is not None:
self.assertTrue(grad_z.stop_gradient) self.assertTrue(grad_z.stop_gradient)
self.assertTrue( np.testing.assert_array_equal(grad_z.numpy(),
np.array_equal(grad_z.numpy(), original_random_grad_z)
original_random_grad_z))
def test_none_one_initial_gradient(self): def test_none_one_initial_gradient(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -583,7 +580,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase): ...@@ -583,7 +580,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase):
grad_2 = a.gradient() grad_2 = a.gradient()
self.assertTrue(np.array_equal(grad_1, grad_2)) np.testing.assert_array_equal(grad_1, grad_2)
def test_compare(self): def test_compare(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -647,8 +644,8 @@ class TestDoubleGradResNet(TestCase): ...@@ -647,8 +644,8 @@ class TestDoubleGradResNet(TestCase):
g_numpy = g.numpy() g_numpy = g.numpy()
self.assertEqual(list(g_numpy.shape), list(out.shape)) self.assertEqual(list(g_numpy.shape), list(out.shape))
self.assertTrue(np.array_equal(egr_out, out)) np.testing.assert_array_equal(egr_out, out)
self.assertTrue(np.array_equal(egr_g_numpy, g_numpy)) np.testing.assert_array_equal(egr_g_numpy, g_numpy)
@dygraph_guard @dygraph_guard
def test_resnet_resnet101(self): def test_resnet_resnet101(self):
...@@ -679,8 +676,8 @@ class TestDoubleGradResNet(TestCase): ...@@ -679,8 +676,8 @@ class TestDoubleGradResNet(TestCase):
g_numpy = g.numpy() g_numpy = g.numpy()
self.assertEqual(list(g_numpy.shape), list(out.shape)) self.assertEqual(list(g_numpy.shape), list(out.shape))
self.assertTrue(np.array_equal(egr_out, out)) np.testing.assert_array_equal(egr_out, out)
self.assertTrue(np.array_equal(egr_g_numpy, g_numpy)) np.testing.assert_array_equal(egr_g_numpy, g_numpy)
class TestDoubleGradBasics(TestCase): class TestDoubleGradBasics(TestCase):
...@@ -705,22 +702,22 @@ class TestDoubleGradBasics(TestCase): ...@@ -705,22 +702,22 @@ class TestDoubleGradBasics(TestCase):
new_x_g.backward() new_x_g.backward()
out_ref = np.ones([3, 3]) * 12.0 out_ref = np.ones([3, 3]) * 12.0
self.assertTrue(np.array_equal(out.numpy(), out_ref)) np.testing.assert_array_equal(out.numpy(), out_ref)
new_x_g_ref = np.ones([3, 3]) * 6.0 new_x_g_ref = np.ones([3, 3]) * 6.0
new_y_g_ref = np.ones([3, 3]) * 6.0 new_y_g_ref = np.ones([3, 3]) * 6.0
self.assertTrue(np.array_equal(new_x_g.numpy(), new_x_g_ref)) np.testing.assert_array_equal(new_x_g.numpy(), new_x_g_ref)
self.assertTrue(np.array_equal(new_y_g.numpy(), new_y_g_ref)) np.testing.assert_array_equal(new_y_g.numpy(), new_y_g_ref)
x_grad_ref = np.ones([3, 3]) * 0.0 x_grad_ref = np.ones([3, 3]) * 0.0
self.assertTrue(np.array_equal(x.grad.numpy(), x_grad_ref)) np.testing.assert_array_equal(x.grad.numpy(), x_grad_ref)
y_grad_ref = np.ones([3, 3]) * 3.0 y_grad_ref = np.ones([3, 3]) * 3.0
self.assertTrue(np.array_equal(y.grad.numpy(), y_grad_ref)) np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref)
grad_out_grad_ref = np.ones([3, 3]) * 6.0 grad_out_grad_ref = np.ones([3, 3]) * 6.0
self.assertTrue( np.testing.assert_array_equal(grad_out.grad.numpy(),
np.array_equal(grad_out.grad.numpy(), grad_out_grad_ref)) grad_out_grad_ref)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -95,29 +95,27 @@ class Test_Forward_Hook(unittest.TestCase): ...@@ -95,29 +95,27 @@ class Test_Forward_Hook(unittest.TestCase):
forward_pre_hook_handle1 = simplenet.register_forward_pre_hook( forward_pre_hook_handle1 = simplenet.register_forward_pre_hook(
forward_pre_hook1) forward_pre_hook1)
outs_pre_hook = simplenet(input, y) outs_pre_hook = simplenet(input, y)
self.assertTrue( np.testing.assert_array_equal(outs_pre_hook.numpy(),
np.array_equal(outs_pre_hook.numpy(), outs_origin1.numpy())) outs_origin1.numpy())
# remove forward_pre_hook # remove forward_pre_hook
forward_pre_hook_handle1.remove() forward_pre_hook_handle1.remove()
outs_pre_hook = simplenet(input, y) outs_pre_hook = simplenet(input, y)
self.assertTrue( np.testing.assert_array_equal(outs_pre_hook.numpy(),
np.array_equal(outs_pre_hook.numpy(), outs_origin.numpy())) outs_origin.numpy())
# register forward_posst_hook # register forward_posst_hook
forward_post_hook_handle1 = simplenet.register_forward_post_hook( forward_post_hook_handle1 = simplenet.register_forward_post_hook(
forward_post_hook1) forward_post_hook1)
outs_forward_hook = simplenet(input, y) outs_forward_hook = simplenet(input, y)
self.assertTrue( np.testing.assert_array_equal(outs_forward_hook.numpy(),
np.array_equal(outs_forward_hook.numpy(), outs_origin.numpy() * 2)
outs_origin.numpy() * 2))
# remove forward_post_hook # remove forward_post_hook
forward_post_hook_handle1.remove() forward_post_hook_handle1.remove()
outs_forward_hook = simplenet(input, y) outs_forward_hook = simplenet(input, y)
self.assertTrue( np.testing.assert_array_equal(outs_forward_hook.numpy(),
np.array_equal(outs_forward_hook.numpy(), outs_origin.numpy())
outs_origin.numpy()))
# test forward_pre_hook and forward_post_hook that don't have return value # test forward_pre_hook and forward_post_hook that don't have return value
def func_forward_hook(self): def func_forward_hook(self):
......
...@@ -218,7 +218,7 @@ class TestDygraphLoadStatic(unittest.TestCase): ...@@ -218,7 +218,7 @@ class TestDygraphLoadStatic(unittest.TestCase):
my_test = MyTest() my_test = MyTest()
my_test.set_dict(new_dict, use_structured_name=False) my_test.set_dict(new_dict, use_structured_name=False)
for k, v in my_test.state_dict().items(): for k, v in my_test.state_dict().items():
self.assertTrue(np.array_equal(v.numpy(), new_dict[v.name])) np.testing.assert_array_equal(v.numpy(), new_dict[v.name])
temp_dir.cleanup() temp_dir.cleanup()
......
...@@ -200,10 +200,9 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -200,10 +200,9 @@ class TestDygraphSimpleNet(unittest.TestCase):
self.assertTrue( self.assertTrue(
np.allclose(static_loss_value, dy_loss_value, rtol=1e-3)) np.allclose(static_loss_value, dy_loss_value, rtol=1e-3))
for key, value in six.iteritems(static_param_init): for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key])) np.testing.assert_array_equal(value, dy_param_init[key])
for key, value in six.iteritems(static_param_updated): for key, value in six.iteritems(static_param_updated):
self.assertTrue(np.array_equal(value, np.testing.assert_array_equal(value, dy_param_updated[key])
dy_param_updated[key]))
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -13,10 +13,11 @@ ...@@ -13,10 +13,11 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import warnings
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
import warnings from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
class TestImperativeNumpyBridge(unittest.TestCase): class TestImperativeNumpyBridge(unittest.TestCase):
...@@ -31,14 +32,14 @@ class TestImperativeNumpyBridge(unittest.TestCase): ...@@ -31,14 +32,14 @@ class TestImperativeNumpyBridge(unittest.TestCase):
w[-1].message) w[-1].message)
# Temporally diable zero_copy # Temporally diable zero_copy
# var = fluid.dygraph.to_variable(data_np, zero_copy=True) # var = fluid.dygraph.to_variable(data_np, zero_copy=True)
# self.assertTrue(np.array_equal(var.numpy(), data_np)) # np.testing.assert_array_equal(var.numpy(), data_np)
# data_np[0][0] = 4 # data_np[0][0] = 4
# self.assertEqual(data_np[0][0], 4) # self.assertEqual(data_np[0][0], 4)
# self.assertEqual(var[0][0].numpy()[0], 4) # self.assertEqual(var[0][0].numpy()[0], 4)
# self.assertTrue(np.array_equal(var.numpy(), data_np)) # np.testing.assert_array_equal(var.numpy(), data_np)
var2 = fluid.dygraph.to_variable(data_np, zero_copy=False) var2 = fluid.dygraph.to_variable(data_np, zero_copy=False)
self.assertTrue(np.array_equal(var2.numpy(), data_np)) np.testing.assert_array_equal(var2.numpy(), data_np)
data_np[0][0] = -1 data_np[0][0] = -1
self.assertEqual(data_np[0][0], -1) self.assertEqual(data_np[0][0], -1)
if not _in_legacy_dygraph(): if not _in_legacy_dygraph():
......
...@@ -573,7 +573,7 @@ class TestDygraphOCRAttention(unittest.TestCase): ...@@ -573,7 +573,7 @@ class TestDygraphOCRAttention(unittest.TestCase):
self.assertTrue(np.allclose(static_out, dy_out)) self.assertTrue(np.allclose(static_out, dy_out))
for key, value in six.iteritems(static_param_init_value): for key, value in six.iteritems(static_param_init_value):
self.assertTrue(np.array_equal(value, dy_param_init_value[key])) np.testing.assert_array_equal(value, dy_param_init_value[key])
for key, value in six.iteritems(static_param_value): for key, value in six.iteritems(static_param_value):
self.assertTrue(np.allclose(value, dy_param_value[key], rtol=1e-05)) self.assertTrue(np.allclose(value, dy_param_value[key], rtol=1e-05))
...@@ -582,7 +582,7 @@ class TestDygraphOCRAttention(unittest.TestCase): ...@@ -582,7 +582,7 @@ class TestDygraphOCRAttention(unittest.TestCase):
self.assertTrue(np.allclose(static_out, eager_out)) self.assertTrue(np.allclose(static_out, eager_out))
for key, value in six.iteritems(static_param_init_value): for key, value in six.iteritems(static_param_init_value):
self.assertTrue(np.array_equal(value, eager_param_init_value[key])) np.testing.assert_array_equal(value, eager_param_init_value[key])
for key, value in six.iteritems(static_param_value): for key, value in six.iteritems(static_param_value):
self.assertTrue( self.assertTrue(
......
...@@ -376,15 +376,15 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -376,15 +376,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
static_param_updated[static_param_name_list[k - static_param_updated[static_param_name_list[k -
3]] = out[k] 3]] = out[k]
self.assertTrue(np.array_equal(static_loss_value, dy_loss_value)) np.testing.assert_array_equal(static_loss_value, dy_loss_value)
self.assertTrue( np.testing.assert_array_equal(static_last_cell_value,
np.array_equal(static_last_cell_value, dy_last_cell_value)) dy_last_cell_value)
self.assertTrue( np.testing.assert_array_equal(static_last_hidden_value,
np.array_equal(static_last_hidden_value, dy_last_hidden_value)) dy_last_hidden_value)
for key, value in six.iteritems(static_param_init): for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key])) np.testing.assert_array_equal(value, dy_param_init[key])
for key, value in six.iteritems(static_param_updated): for key, value in six.iteritems(static_param_updated):
self.assertTrue(np.array_equal(value, dy_param_updated[key])) np.testing.assert_array_equal(value, dy_param_updated[key])
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -164,15 +164,15 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): ...@@ -164,15 +164,15 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
static_param_updated[static_param_name_list[k - static_param_updated[static_param_name_list[k -
3]] = out[k] 3]] = out[k]
self.assertTrue(np.array_equal(static_loss_value, dy_loss_value)) np.testing.assert_array_equal(static_loss_value, dy_loss_value)
self.assertTrue( np.testing.assert_array_equal(static_last_cell_value,
np.array_equal(static_last_cell_value, dy_last_cell_value)) dy_last_cell_value)
self.assertTrue( np.testing.assert_array_equal(static_last_hidden_value,
np.array_equal(static_last_hidden_value, dy_last_hidden_value)) dy_last_hidden_value)
for key, value in six.iteritems(static_param_init): for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key])) np.testing.assert_array_equal(value, dy_param_init[key])
for key, value in six.iteritems(static_param_updated): for key, value in six.iteritems(static_param_updated):
self.assertTrue(np.array_equal(value, dy_param_updated[key])) np.testing.assert_array_equal(value, dy_param_updated[key])
def test_ptb_rnn_sort_gradient(self): def test_ptb_rnn_sort_gradient(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -117,10 +117,10 @@ class TestRecurrentFeed(unittest.TestCase): ...@@ -117,10 +117,10 @@ class TestRecurrentFeed(unittest.TestCase):
static_dout = out[2] static_dout = out[2]
original_np1 = static_out_value original_np1 = static_out_value
self.assertTrue(np.array_equal(static_sum_out, sum_out_value)) np.testing.assert_array_equal(static_sum_out, sum_out_value)
self.assertTrue(np.array_equal(static_sum_out, eager_sum_out_value)) np.testing.assert_array_equal(static_sum_out, eager_sum_out_value)
self.assertTrue(np.array_equal(static_dout, dyout)) np.testing.assert_array_equal(static_dout, dyout)
self.assertTrue(np.array_equal(static_dout, eager_dyout)) np.testing.assert_array_equal(static_dout, eager_dyout)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -387,8 +387,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -387,8 +387,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict() opti_dict = adam.state_dict()
for k, v in opti_dict.items(): for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)): if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue( np.testing.assert_array_equal(v.numpy(),
np.array_equal(v.numpy(), self.base_opti[v.name])) self.base_opti[v.name])
else: else:
self.assertEqual(v, self.base_opti[k]) self.assertEqual(v, self.base_opti[k])
...@@ -409,7 +409,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -409,7 +409,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
base_t = self.model_base[k] base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t)) np.testing.assert_array_equal(new_t, base_t)
def func_testSetVariable(self): def func_testSetVariable(self):
seed = 90 seed = 90
...@@ -492,8 +492,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -492,8 +492,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict() opti_dict = adam.state_dict()
for k, v in opti_dict.items(): for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)): if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue( np.testing.assert_array_equal(v.numpy(),
np.array_equal(v.numpy(), self.base_opti[v.name])) self.base_opti[v.name])
else: else:
self.assertEqual(v, self.base_opti[k]) self.assertEqual(v, self.base_opti[k])
...@@ -514,7 +514,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -514,7 +514,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
base_t = self.model_base[k] base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t)) np.testing.assert_array_equal(new_t, base_t)
def func_testSetNumpy(self): def func_testSetNumpy(self):
seed = 90 seed = 90
...@@ -601,8 +601,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -601,8 +601,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict() opti_dict = adam.state_dict()
for k, v in opti_dict.items(): for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)): if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue( np.testing.assert_array_equal(v.numpy(),
np.array_equal(v.numpy(), self.base_opti[v.name])) self.base_opti[v.name])
else: else:
self.assertEqual(v, self.base_opti[k]) self.assertEqual(v, self.base_opti[k])
...@@ -625,7 +625,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -625,7 +625,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
base_t = self.model_base[k] base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t)) np.testing.assert_array_equal(new_t, base_t)
def func_testSetVariableBeforeTrain(self): def func_testSetVariableBeforeTrain(self):
seed = 90 seed = 90
...@@ -682,17 +682,15 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -682,17 +682,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict() opti_dict = adam.state_dict()
for k, v in opti_dict.items(): for k, v in opti_dict.items():
if k == "global_step": if k == "global_step":
self.assertTrue( np.testing.assert_array_equal(v.numpy(),
np.array_equal(v.numpy(), self.base_opti[v.name] + 1)) self.base_opti[v.name] + 1)
if k.find("beta1_pow_acc_0") > 0: if k.find("beta1_pow_acc_0") > 0:
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(v.numpy(), v.numpy(), self.base_opti[v.name] * adam._beta1)
self.base_opti[v.name] * adam._beta1))
if k.find("beta2_pow_acc_0") > 0: if k.find("beta2_pow_acc_0") > 0:
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(v.numpy(), v.numpy(), self.base_opti[v.name] * adam._beta2)
self.base_opti[v.name] * adam._beta2))
state_dict = ptb_model.state_dict() state_dict = ptb_model.state_dict()
...@@ -700,7 +698,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -700,7 +698,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
new_t = v.numpy() new_t = v.numpy()
base_t = self.model_base[k] base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t)) np.testing.assert_array_equal(new_t, base_t)
def func_testLoadAndSetVarBaseBeforeTrain(self): def func_testLoadAndSetVarBaseBeforeTrain(self):
seed = 90 seed = 90
...@@ -769,17 +767,15 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -769,17 +767,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict() opti_dict = adam.state_dict()
for k, v in opti_dict.items(): for k, v in opti_dict.items():
if k == "global_step": if k == "global_step":
self.assertTrue( np.testing.assert_array_equal(v.numpy(),
np.array_equal(v.numpy(), self.base_opti[v.name] + 1)) self.base_opti[v.name] + 1)
if k.find("beta1_pow_acc_0") > 0: if k.find("beta1_pow_acc_0") > 0:
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(v.numpy(), v.numpy(), self.base_opti[v.name] * adam._beta1)
self.base_opti[v.name] * adam._beta1))
if k.find("beta2_pow_acc_0") > 0: if k.find("beta2_pow_acc_0") > 0:
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(v.numpy(), v.numpy(), self.base_opti[v.name] * adam._beta2)
self.base_opti[v.name] * adam._beta2))
# check parameter # check parameter
...@@ -789,7 +785,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -789,7 +785,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
new_t = v.numpy() new_t = v.numpy()
base_t = self.model_base[k] base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t)) np.testing.assert_array_equal(new_t, base_t)
def func_testSetNumpyBeforeTrain(self): def func_testSetNumpyBeforeTrain(self):
seed = 90 seed = 90
...@@ -870,17 +866,15 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -870,17 +866,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict() opti_dict = adam.state_dict()
for k, v in opti_dict.items(): for k, v in opti_dict.items():
if k == "global_step": if k == "global_step":
self.assertTrue( np.testing.assert_array_equal(v.numpy(),
np.array_equal(v.numpy(), self.base_opti[v.name] + 1)) self.base_opti[v.name] + 1)
if k.find("beta1_pow_acc_0") > 0: if k.find("beta1_pow_acc_0") > 0:
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(v.numpy(), v.numpy(), self.base_opti[v.name] * adam._beta1)
self.base_opti[v.name] * adam._beta1))
if k.find("beta2_pow_acc_0") > 0: if k.find("beta2_pow_acc_0") > 0:
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(v.numpy(), v.numpy(), self.base_opti[v.name] * adam._beta2)
self.base_opti[v.name] * adam._beta2))
# check parameter # check parameter
...@@ -890,7 +884,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -890,7 +884,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
new_t = v.numpy() new_t = v.numpy()
base_t = self.model_base[k] base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t)) np.testing.assert_array_equal(new_t, base_t)
def func_testOnlyLoadParams(self): def func_testOnlyLoadParams(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
......
...@@ -401,8 +401,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -401,8 +401,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict() opti_dict = adam.state_dict()
for k, v in opti_dict.items(): for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)): if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue( np.testing.assert_array_equal(v.numpy(),
np.array_equal(v.numpy(), self.base_opti[v.name])) self.base_opti[v.name])
else: else:
self.assertEqual(v, self.base_opti[k]) self.assertEqual(v, self.base_opti[k])
...@@ -423,7 +423,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -423,7 +423,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
base_t = self.model_base[k] base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t)) np.testing.assert_array_equal(new_t, base_t)
def func_testSetVariable(self): def func_testSetVariable(self):
seed = 90 seed = 90
...@@ -508,8 +508,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -508,8 +508,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict() opti_dict = adam.state_dict()
for k, v in opti_dict.items(): for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)): if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue( np.testing.assert_array_equal(v.numpy(),
np.array_equal(v.numpy(), self.base_opti[v.name])) self.base_opti[v.name])
else: else:
self.assertEqual(v, self.base_opti[k]) self.assertEqual(v, self.base_opti[k])
...@@ -530,7 +530,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -530,7 +530,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
base_t = self.model_base[k] base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t)) np.testing.assert_array_equal(new_t, base_t)
def func_testSetNumpy(self): def func_testSetNumpy(self):
seed = 90 seed = 90
...@@ -619,8 +619,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -619,8 +619,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict() opti_dict = adam.state_dict()
for k, v in opti_dict.items(): for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)): if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue( np.testing.assert_array_equal(v.numpy(),
np.array_equal(v.numpy(), self.base_opti[v.name])) self.base_opti[v.name])
else: else:
self.assertEqual(v, self.base_opti[k]) self.assertEqual(v, self.base_opti[k])
...@@ -643,7 +643,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -643,7 +643,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
base_t = self.model_base[k] base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t)) np.testing.assert_array_equal(new_t, base_t)
def func_testSetVariableBeforeTrain(self): def func_testSetVariableBeforeTrain(self):
seed = 90 seed = 90
...@@ -702,17 +702,15 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -702,17 +702,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict() opti_dict = adam.state_dict()
for k, v in opti_dict.items(): for k, v in opti_dict.items():
if k == "global_step": if k == "global_step":
self.assertTrue( np.testing.assert_array_equal(v.numpy(),
np.array_equal(v.numpy(), self.base_opti[v.name] + 1)) self.base_opti[v.name] + 1)
if k.find("beta1_pow_acc_0") > 0: if k.find("beta1_pow_acc_0") > 0:
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(v.numpy(), v.numpy(), self.base_opti[v.name] * adam._beta1)
self.base_opti[v.name] * adam._beta1))
if k.find("beta2_pow_acc_0") > 0: if k.find("beta2_pow_acc_0") > 0:
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(v.numpy(), v.numpy(), self.base_opti[v.name] * adam._beta2)
self.base_opti[v.name] * adam._beta2))
state_dict = ptb_model.state_dict() state_dict = ptb_model.state_dict()
...@@ -720,7 +718,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -720,7 +718,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
new_t = v.numpy() new_t = v.numpy()
base_t = self.model_base[k] base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t)) np.testing.assert_array_equal(new_t, base_t)
def func_testLoadAndSetVarBaseBeforeTrain(self): def func_testLoadAndSetVarBaseBeforeTrain(self):
seed = 90 seed = 90
...@@ -790,17 +788,15 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -790,17 +788,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict() opti_dict = adam.state_dict()
for k, v in opti_dict.items(): for k, v in opti_dict.items():
if k == "global_step": if k == "global_step":
self.assertTrue( np.testing.assert_array_equal(v.numpy(),
np.array_equal(v.numpy(), self.base_opti[v.name] + 1)) self.base_opti[v.name] + 1)
if k.find("beta1_pow_acc_0") > 0: if k.find("beta1_pow_acc_0") > 0:
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(v.numpy(), v.numpy(), self.base_opti[v.name] * adam._beta1)
self.base_opti[v.name] * adam._beta1))
if k.find("beta2_pow_acc_0") > 0: if k.find("beta2_pow_acc_0") > 0:
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(v.numpy(), v.numpy(), self.base_opti[v.name] * adam._beta2)
self.base_opti[v.name] * adam._beta2))
# check parameter # check parameter
...@@ -810,7 +806,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -810,7 +806,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
new_t = v.numpy() new_t = v.numpy()
base_t = self.model_base[k] base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t)) np.testing.assert_array_equal(new_t, base_t)
def func_testSetNumpyBeforeTrain(self): def func_testSetNumpyBeforeTrain(self):
seed = 90 seed = 90
...@@ -892,18 +888,15 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -892,18 +888,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict() opti_dict = adam.state_dict()
for k, v in opti_dict.items(): for k, v in opti_dict.items():
if k == "LR_Scheduler": if k == "LR_Scheduler":
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(v['last_epoch'], v['last_epoch'], self.base_opti[k]['last_epoch'] + 1)
self.base_opti[k]['last_epoch'] + 1))
if k.find("beta1_pow_acc_0") > 0: if k.find("beta1_pow_acc_0") > 0:
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(v.numpy(), v.numpy(), self.base_opti[v.name] * adam._beta1)
self.base_opti[v.name] * adam._beta1))
if k.find("beta2_pow_acc_0") > 0: if k.find("beta2_pow_acc_0") > 0:
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(v.numpy(), v.numpy(), self.base_opti[v.name] * adam._beta2)
self.base_opti[v.name] * adam._beta2))
# check parameter # check parameter
...@@ -913,7 +906,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -913,7 +906,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
new_t = v.numpy() new_t = v.numpy()
base_t = self.model_base[k] base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t)) np.testing.assert_array_equal(new_t, base_t)
def func_testOnlyLoadParams(self): def func_testOnlyLoadParams(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
......
...@@ -203,13 +203,11 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -203,13 +203,11 @@ class TestDygraphSimpleNet(unittest.TestCase):
static_param_updated[static_param_name_list[ static_param_updated[static_param_name_list[
k - 1]] = out[k] k - 1]] = out[k]
self.assertTrue(np.array_equal(static_loss_value, np.testing.assert_array_equal(static_loss_value, dy_loss_value)
dy_loss_value))
for key, value in six.iteritems(static_param_init): for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key])) np.testing.assert_array_equal(value, dy_param_init[key])
for key, value in six.iteritems(static_param_updated): for key, value in six.iteritems(static_param_updated):
self.assertTrue(np.array_equal(value, np.testing.assert_array_equal(value, dy_param_updated[key])
dy_param_updated[key]))
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -313,11 +313,11 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): ...@@ -313,11 +313,11 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
self.load_and_train_static() self.load_and_train_static()
# Phase 3. compare # Phase 3. compare
self.assertTrue(np.array_equal(static_x_data, dy_x_data)) np.testing.assert_array_equal(static_x_data, dy_x_data)
for key, value in six.iteritems(static_param_init_value): for key, value in six.iteritems(static_param_init_value):
key = dict_old_new_init[key] key = dict_old_new_init[key]
self.assertTrue(np.array_equal(value, dy_param_init_value[key])) np.testing.assert_array_equal(value, dy_param_init_value[key])
# np.testing.assert_array_almost_equal(static_out, dy_out) # np.testing.assert_array_almost_equal(static_out, dy_out)
self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04)) self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04))
...@@ -341,10 +341,10 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): ...@@ -341,10 +341,10 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
self.load_and_train_static() self.load_and_train_static()
# Phase 3. compare # Phase 3. compare
self.assertTrue(np.array_equal(static_x_data, dy_x_data)) np.testing.assert_array_equal(static_x_data, dy_x_data)
for key, value in six.iteritems(static_param_init_value): for key, value in six.iteritems(static_param_init_value):
key = dict_old_new_init[key] key = dict_old_new_init[key]
self.assertTrue(np.array_equal(value, dy_param_init_value[key])) np.testing.assert_array_equal(value, dy_param_init_value[key])
# np.testing.assert_array_almost_equal(static_out, dy_out) # np.testing.assert_array_almost_equal(static_out, dy_out)
self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04)) self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04))
...@@ -368,7 +368,7 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): ...@@ -368,7 +368,7 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
self.load_and_infer_static() self.load_and_infer_static()
# Phase 3. compare # Phase 3. compare
self.assertTrue(np.array_equal(static_x_data, dy_x_data)) np.testing.assert_array_equal(static_x_data, dy_x_data)
np.testing.assert_array_almost_equal(static_out, dy_out) np.testing.assert_array_almost_equal(static_out, dy_out)
self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04)) self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04))
......
...@@ -232,7 +232,7 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase): ...@@ -232,7 +232,7 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase):
static_param_init_value.keys()) static_param_init_value.keys())
for key, value in six.iteritems(static_param_init_value): for key, value in six.iteritems(static_param_init_value):
key = dict_old_new_init[key] key = dict_old_new_init[key]
self.assertTrue(np.array_equal(value, dy_param_init_value[key])) np.testing.assert_array_equal(value, dy_param_init_value[key])
self.assertTrue(np.allclose(static_out, dy_out)) self.assertTrue(np.allclose(static_out, dy_out))
......
...@@ -62,7 +62,7 @@ class TestTracedLayerRecordNonPersistableInput(unittest.TestCase): ...@@ -62,7 +62,7 @@ class TestTracedLayerRecordNonPersistableInput(unittest.TestCase):
dygraph_out = layer(in_x) dygraph_out = layer(in_x)
dygraph_out_numpy = dygraph_out.numpy() dygraph_out_numpy = dygraph_out.numpy()
static_out = traced_layer([in_x])[0] static_out = traced_layer([in_x])[0]
self.assertTrue(np.array_equal(dygraph_out_numpy, static_out)) np.testing.assert_array_equal(dygraph_out_numpy, static_out)
loss = fluid.layers.reduce_mean(dygraph_out) loss = fluid.layers.reduce_mean(dygraph_out)
loss.backward() loss.backward()
......
...@@ -1133,19 +1133,19 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): ...@@ -1133,19 +1133,19 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
static_param_updated[static_param_name_list[k - static_param_updated[static_param_name_list[k -
4]] = out[k] 4]] = out[k]
if _in_legacy_dygraph(): if _in_legacy_dygraph():
self.assertTrue( np.testing.assert_array_equal(static_avg_cost_value,
np.array_equal(static_avg_cost_value, dy_avg_cost_value)) dy_avg_cost_value)
self.assertTrue( np.testing.assert_array_equal(static_sum_cost_value,
np.array_equal(static_sum_cost_value, dy_sum_cost_value)) dy_sum_cost_value)
self.assertTrue( np.testing.assert_array_equal(static_predict_value,
np.array_equal(static_predict_value, dy_predict_value)) dy_predict_value)
self.assertTrue( np.testing.assert_array_equal(static_token_num_value,
np.array_equal(static_token_num_value, dy_token_num_value)) dy_token_num_value)
for key, value in six.iteritems(static_param_init): for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key])) np.testing.assert_array_equal(value, dy_param_init[key])
for key, value in six.iteritems(static_param_updated): for key, value in six.iteritems(static_param_updated):
self.assertTrue(np.array_equal(value, dy_param_updated[key])) np.testing.assert_array_equal(value, dy_param_updated[key])
# compare eager result with imperative result # compare eager result with imperative result
with guard(): with guard():
...@@ -1164,7 +1164,7 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): ...@@ -1164,7 +1164,7 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
self.assertTrue(np.allclose(dy_token_num_value, eager_token_num_value)) self.assertTrue(np.allclose(dy_token_num_value, eager_token_num_value))
for key, value in six.iteritems(static_param_init): for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, eager_param_init[key])) np.testing.assert_array_equal(value, eager_param_init[key])
for key, value in six.iteritems(dy_param_updated): for key, value in six.iteritems(dy_param_updated):
self.assertTrue(np.allclose(value, eager_param_updated[key])) self.assertTrue(np.allclose(value, eager_param_updated[key]))
......
...@@ -78,37 +78,34 @@ class TestDygraphTripleGradMatmul(TestCase): ...@@ -78,37 +78,34 @@ class TestDygraphTripleGradMatmul(TestCase):
new_a.backward() new_a.backward()
out_ref = np.ones([3, 3]) * 12.0 out_ref = np.ones([3, 3]) * 12.0
self.assertTrue(np.array_equal(out.numpy(), out_ref)) np.testing.assert_array_equal(out.numpy(), out_ref)
new_x_g_ref = np.ones([3, 3]) * 6.0 new_x_g_ref = np.ones([3, 3]) * 6.0
new_y_g_ref = np.ones([3, 3]) * 6.0 new_y_g_ref = np.ones([3, 3]) * 6.0
self.assertTrue(np.array_equal(new_x_g.numpy(), new_x_g_ref)) np.testing.assert_array_equal(new_x_g.numpy(), new_x_g_ref)
self.assertTrue(np.array_equal(new_y_g.numpy(), new_y_g_ref)) np.testing.assert_array_equal(new_y_g.numpy(), new_y_g_ref)
new_a_ref = np.ones([3, 3]) * 3.0 new_a_ref = np.ones([3, 3]) * 3.0
new_b_ref = np.ones([3, 3]) * 3.0 new_b_ref = np.ones([3, 3]) * 3.0
new_c_ref = np.ones([3, 3]) * 12.0 new_c_ref = np.ones([3, 3]) * 12.0
self.assertTrue(np.array_equal(new_a.numpy(), new_a_ref)) np.testing.assert_array_equal(new_a.numpy(), new_a_ref)
self.assertTrue(np.array_equal(new_b.numpy(), new_b_ref)) np.testing.assert_array_equal(new_b.numpy(), new_b_ref)
self.assertTrue(np.array_equal(new_c.numpy(), new_c_ref)) np.testing.assert_array_equal(new_c.numpy(), new_c_ref)
x_grad_ref = np.ones([3, 3]) * 0.0 x_grad_ref = np.ones([3, 3]) * 0.0
self.assertTrue(np.array_equal(x.grad.numpy(), x_grad_ref)) np.testing.assert_array_equal(x.grad.numpy(), x_grad_ref)
y_grad_ref = np.ones([3, 3]) * 0.0 y_grad_ref = np.ones([3, 3]) * 0.0
self.assertTrue(np.array_equal(y.grad.numpy(), y_grad_ref)) np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref)
new_out_g_ref = np.ones([3, 3]) * 3.0 new_out_g_ref = np.ones([3, 3]) * 3.0
self.assertTrue( np.testing.assert_array_equal(new_out_g.grad.numpy(), new_out_g_ref)
np.array_equal(new_out_g.grad.numpy(), new_out_g_ref))
new_x_g_g_ref = np.ones([3, 3]) * 0.0 new_x_g_g_ref = np.ones([3, 3]) * 0.0
new_y_g_g_ref = np.ones([3, 3]) * 3.0 new_y_g_g_ref = np.ones([3, 3]) * 3.0
self.assertTrue( np.testing.assert_array_equal(new_x_g_g.grad.numpy(), new_x_g_g_ref)
np.array_equal(new_x_g_g.grad.numpy(), new_x_g_g_ref)) np.testing.assert_array_equal(new_y_g_g.grad.numpy(), new_y_g_g_ref)
self.assertTrue(
np.array_equal(new_y_g_g.grad.numpy(), new_y_g_g_ref))
class TestDygraphTripleGrad(TestCase): class TestDygraphTripleGrad(TestCase):
......
...@@ -25,7 +25,7 @@ class TestImperativeUsingNonZeroGpu(unittest.TestCase): ...@@ -25,7 +25,7 @@ class TestImperativeUsingNonZeroGpu(unittest.TestCase):
def run_main(self, np_arr, place): def run_main(self, np_arr, place):
with guard(place): with guard(place):
var = to_variable(np_arr) var = to_variable(np_arr)
self.assertTrue(np.array_equal(np_arr, var.numpy())) np.testing.assert_array_equal(np_arr, var.numpy())
def func_non_zero_gpu(self): def func_non_zero_gpu(self):
if not fluid.is_compiled_with_cuda(): if not fluid.is_compiled_with_cuda():
......
...@@ -795,8 +795,8 @@ class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase): ...@@ -795,8 +795,8 @@ class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase):
dynamic_res = run_dynamic_graph() dynamic_res = run_dynamic_graph()
static_res = run_static_graph() static_res = run_static_graph()
self.assertTrue(np.array_equal(dynamic_res[0], static_res[0])) np.testing.assert_array_equal(dynamic_res[0], static_res[0])
self.assertTrue(np.array_equal(dynamic_res[1], static_res[1])) np.testing.assert_array_equal(dynamic_res[1], static_res[1])
def test_order(self): def test_order(self):
with framework._test_eager_guard(): with framework._test_eager_guard():
...@@ -819,7 +819,7 @@ class TestOrthogonalInitializer1(unittest.TestCase): ...@@ -819,7 +819,7 @@ class TestOrthogonalInitializer1(unittest.TestCase):
self.num_ops = 9 self.num_ops = 9
def check_result(self, a, b): def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b)) np.testing.assert_array_equal(a, b)
self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(10))) self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(10)))
def func_orthogonal(self): def func_orthogonal(self):
...@@ -878,7 +878,7 @@ class TestOrthogonalInitializer2(TestOrthogonalInitializer1): ...@@ -878,7 +878,7 @@ class TestOrthogonalInitializer2(TestOrthogonalInitializer1):
self.num_ops = 8 self.num_ops = 8
def check_result(self, a, b): def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b)) np.testing.assert_array_equal(a, b)
self.assertTrue(np.allclose(np.matmul(a.T, a), 4 * np.eye(10))) self.assertTrue(np.allclose(np.matmul(a.T, a), 4 * np.eye(10)))
...@@ -897,7 +897,7 @@ class TestOrthogonalInitializer3(TestOrthogonalInitializer1): ...@@ -897,7 +897,7 @@ class TestOrthogonalInitializer3(TestOrthogonalInitializer1):
self.num_ops = 8 self.num_ops = 8
def check_result(self, a, b): def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b)) np.testing.assert_array_equal(a, b)
self.assertTrue(np.allclose(np.matmul(a.T, a), np.eye(10), atol=1.e-6)) self.assertTrue(np.allclose(np.matmul(a.T, a), np.eye(10), atol=1.e-6))
self.assertTrue(np.allclose(np.matmul(a, a.T), np.eye(10), atol=1.e-6)) self.assertTrue(np.allclose(np.matmul(a, a.T), np.eye(10), atol=1.e-6))
...@@ -922,7 +922,7 @@ class TestOrthogonalInitializer4(unittest.TestCase): ...@@ -922,7 +922,7 @@ class TestOrthogonalInitializer4(unittest.TestCase):
self.kernel_size = (3, 3) self.kernel_size = (3, 3)
def check_result(self, a, b): def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b)) np.testing.assert_array_equal(a, b)
a = a.reshape(6, -1) a = a.reshape(6, -1)
self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(6))) self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(6)))
...@@ -973,7 +973,7 @@ class TestOrthogonalInitializer5(TestOrthogonalInitializer4): ...@@ -973,7 +973,7 @@ class TestOrthogonalInitializer5(TestOrthogonalInitializer4):
self.kernel_size = (3, 3) self.kernel_size = (3, 3)
def check_result(self, a, b): def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b)) np.testing.assert_array_equal(a, b)
a = a.reshape(50, -1) a = a.reshape(50, -1)
self.assertTrue(np.allclose(np.matmul(a.T, a), 4 * np.eye(36))) self.assertTrue(np.allclose(np.matmul(a.T, a), 4 * np.eye(36)))
...@@ -993,7 +993,7 @@ class TestOrthogonalInitializer6(TestOrthogonalInitializer4): ...@@ -993,7 +993,7 @@ class TestOrthogonalInitializer6(TestOrthogonalInitializer4):
self.kernel_size = (3, 3) self.kernel_size = (3, 3)
def check_result(self, a, b): def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b)) np.testing.assert_array_equal(a, b)
a = a.reshape(36, -1) a = a.reshape(36, -1)
self.assertTrue(np.allclose(np.matmul(a.T, a), np.eye(36), atol=1.e-6)) self.assertTrue(np.allclose(np.matmul(a.T, a), np.eye(36), atol=1.e-6))
self.assertTrue(np.allclose(np.matmul(a, a.T), np.eye(36), atol=1.e-6)) self.assertTrue(np.allclose(np.matmul(a, a.T), np.eye(36), atol=1.e-6))
...@@ -1014,8 +1014,8 @@ class TestDiracInitializer1(unittest.TestCase): ...@@ -1014,8 +1014,8 @@ class TestDiracInitializer1(unittest.TestCase):
self.num_ops = 8 #fill_constant*2, reshape*2, assign_value*2, scatter, cast self.num_ops = 8 #fill_constant*2, reshape*2, assign_value*2, scatter, cast
def check_result(self, w_dygraph, w_static, conv_in, conv_out): def check_result(self, w_dygraph, w_static, conv_in, conv_out):
self.assertTrue(np.array_equal(w_dygraph, w_static)) np.testing.assert_array_equal(w_dygraph, w_static)
self.assertTrue(np.array_equal(conv_out, conv_in[:, 0:2, 1:9])) np.testing.assert_array_equal(conv_out, conv_in[:, 0:2, 1:9])
def func_dirac(self): def func_dirac(self):
self.config() self.config()
...@@ -1079,11 +1079,11 @@ class TestDiracInitializer2(TestDiracInitializer1): ...@@ -1079,11 +1079,11 @@ class TestDiracInitializer2(TestDiracInitializer1):
self.num_ops = 8 self.num_ops = 8
def check_result(self, w_dygraph, w_static, conv_in, conv_out): def check_result(self, w_dygraph, w_static, conv_in, conv_out):
self.assertTrue(np.array_equal(w_dygraph, w_static)) np.testing.assert_array_equal(w_dygraph, w_static)
self.assertTrue( np.testing.assert_array_equal(conv_out[:, 0:4, :, :], conv_in[:, :, 1:9,
np.array_equal(conv_out[:, 0:4, :, :], conv_in[:, :, 1:9, 1:9])) 1:9])
self.assertTrue( np.testing.assert_array_equal(conv_out[:, 4:8, :, :],
np.array_equal(conv_out[:, 4:8, :, :], np.zeros([8, 4, 8, 8]))) np.zeros([8, 4, 8, 8]))
# initialize Conv3D weight # initialize Conv3D weight
...@@ -1101,13 +1101,11 @@ class TestDiracInitializer3(TestDiracInitializer1): ...@@ -1101,13 +1101,11 @@ class TestDiracInitializer3(TestDiracInitializer1):
self.num_ops = 7 self.num_ops = 7
def check_result(self, w_dygraph, w_static, conv_in, conv_out): def check_result(self, w_dygraph, w_static, conv_in, conv_out):
self.assertTrue(np.array_equal(w_dygraph, w_static)) np.testing.assert_array_equal(w_dygraph, w_static)
self.assertTrue( np.testing.assert_array_equal(conv_out[:, 0:5, :, :, :],
np.array_equal(conv_out[:, 0:5, :, :, :], conv_in[:, :, 1:9, 1:9, conv_in[:, :, 1:9, 1:9, 1:9])
1:9])) np.testing.assert_array_equal(conv_out[:, 5:10, :, :, :],
self.assertTrue( conv_in[:, :, 1:9, 1:9, 1:9])
np.array_equal(conv_out[:, 5:10, :, :, :], conv_in[:, :, 1:9, 1:9,
1:9]))
def test_error(self): def test_error(self):
self.config() self.config()
......
...@@ -142,7 +142,7 @@ class TestDygraphInplace(unittest.TestCase): ...@@ -142,7 +142,7 @@ class TestDygraphInplace(unittest.TestCase):
self.assertTrue(id(var) == id(inplace_var)) self.assertTrue(id(var) == id(inplace_var))
inplace_var[0] = 2. inplace_var[0] = 2.
self.assertTrue(np.array_equal(var.numpy(), inplace_var.numpy())) np.testing.assert_array_equal(var.numpy(), inplace_var.numpy())
def test_inplace_api(self): def test_inplace_api(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -276,7 +276,7 @@ class TestDygraphInplace(unittest.TestCase): ...@@ -276,7 +276,7 @@ class TestDygraphInplace(unittest.TestCase):
loss.backward() loss.backward()
grad_var_a = var_a.grad.numpy() grad_var_a = var_a.grad.numpy()
self.assertTrue(np.array_equal(grad_var_a_inplace, grad_var_a)) np.testing.assert_array_equal(grad_var_a_inplace, grad_var_a)
def test_backward_success_2(self): def test_backward_success_2(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -506,7 +506,7 @@ class TestLossIsInplaceVar(unittest.TestCase): ...@@ -506,7 +506,7 @@ class TestLossIsInplaceVar(unittest.TestCase):
loss.backward() loss.backward()
grad_var_a = var_a.grad.numpy() grad_var_a = var_a.grad.numpy()
self.assertTrue(np.array_equal(inplace_grad_var_a, grad_var_a)) np.testing.assert_array_equal(inplace_grad_var_a, grad_var_a)
def test_loss_is_inplace_var(self): def test_loss_is_inplace_var(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -108,7 +108,7 @@ class TestInplaceAddto(unittest.TestCase): ...@@ -108,7 +108,7 @@ class TestInplaceAddto(unittest.TestCase):
res1, w1 = run_program(True) res1, w1 = run_program(True)
res2, w2 = run_program(False) res2, w2 = run_program(False)
self.assertTrue(np.array_equal(res1, res2)) np.testing.assert_array_equal(res1, res2)
def test_nchw(self): def test_nchw(self):
self.check_result() self.check_result()
......
...@@ -56,7 +56,7 @@ class TestStaticAutoGeneratedAPI(unittest.TestCase): ...@@ -56,7 +56,7 @@ class TestStaticAutoGeneratedAPI(unittest.TestCase):
feed={"x": self.np_x}, feed={"x": self.np_x},
fetch_list=[x, out]) fetch_list=[x, out])
self.assertTrue(np.array_equal(fetch_x, self.np_x)) np.testing.assert_array_equal(fetch_x, self.np_x)
self.assertTrue( self.assertTrue(
self.np_compare(fetch_out, self.executed_numpy_api(self.np_x))) self.np_compare(fetch_out, self.executed_numpy_api(self.np_x)))
......
...@@ -388,9 +388,9 @@ class TestJitSaveLoad(unittest.TestCase): ...@@ -388,9 +388,9 @@ class TestJitSaveLoad(unittest.TestCase):
# inference & compare # inference & compare
x = fluid.dygraph.to_variable( x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32')) np.random.random((1, 784)).astype('float32'))
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(train_layer(x).numpy(), train_layer(x).numpy(),
infer_layer(x).numpy())) infer_layer(x).numpy())
def load_and_finetune(self, train_layer, load_train_layer): def load_and_finetune(self, train_layer, load_train_layer):
train_layer.train() train_layer.train()
...@@ -398,8 +398,8 @@ class TestJitSaveLoad(unittest.TestCase): ...@@ -398,8 +398,8 @@ class TestJitSaveLoad(unittest.TestCase):
# train & compare # train & compare
img0, _, train_loss = train(train_layer) img0, _, train_loss = train(train_layer)
img1, _, load_train_loss = train(load_train_layer) img1, _, load_train_loss = train(load_train_layer)
self.assertTrue( np.testing.assert_array_equal(train_loss.numpy(),
np.array_equal(train_loss.numpy(), load_train_loss.numpy())) load_train_loss.numpy())
def load_dygraph_state_dict(self, train_layer): def load_dygraph_state_dict(self, train_layer):
train_layer.eval() train_layer.eval()
...@@ -414,9 +414,9 @@ class TestJitSaveLoad(unittest.TestCase): ...@@ -414,9 +414,9 @@ class TestJitSaveLoad(unittest.TestCase):
# inference & compare # inference & compare
x = fluid.dygraph.to_variable( x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32')) np.random.random((1, 784)).astype('float32'))
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(train_layer(x).numpy(), train_layer(x).numpy(),
new_layer(x).numpy())) new_layer(x).numpy())
def test_load_dygraph_no_path(self): def test_load_dygraph_no_path(self):
model_path = os.path.join(self.temp_dir.name, model_path = os.path.join(self.temp_dir.name,
...@@ -673,9 +673,9 @@ class TestJitSaveLoadConfig(unittest.TestCase): ...@@ -673,9 +673,9 @@ class TestJitSaveLoadConfig(unittest.TestCase):
infer_layer = paddle.jit.load(model_path) infer_layer = paddle.jit.load(model_path)
x = fluid.dygraph.to_variable( x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')) np.random.random((4, 8)).astype('float32'))
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(train_layer(x)[0].numpy(), train_layer(x)[0].numpy(),
infer_layer(x).numpy())) infer_layer(x).numpy())
def test_save_no_support_config_error(self): def test_save_no_support_config_error(self):
layer = LinearNet(784, 1) layer = LinearNet(784, 1)
...@@ -778,9 +778,9 @@ class TestJitPruneModelAndLoad(unittest.TestCase): ...@@ -778,9 +778,9 @@ class TestJitPruneModelAndLoad(unittest.TestCase):
x = fluid.dygraph.to_variable( x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')) np.random.random((4, 8)).astype('float32'))
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(train_layer(x)[0].numpy(), train_layer(x)[0].numpy(),
infer_layer(x).numpy())) infer_layer(x).numpy())
def test_load_var_not_in_extra_var_info(self): def test_load_var_not_in_extra_var_info(self):
self.train_and_save() self.train_and_save()
...@@ -831,10 +831,12 @@ class TestJitSaveMultiCases(unittest.TestCase): ...@@ -831,10 +831,12 @@ class TestJitSaveMultiCases(unittest.TestCase):
else: else:
pred = layer(x).numpy() pred = layer(x).numpy()
loaded_pred = loaded_layer(x).numpy() loaded_pred = loaded_layer(x).numpy()
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(pred, loaded_pred), pred,
msg="Result diff when load and inference:\nlayer result:\n{}\n" \ loaded_pred,
"loaded layer result:\n{}".format(pred, loaded_pred)) err_msg=
'Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}'
.format(pred, loaded_pred))
def test_no_prune_to_static_after_train(self): def test_no_prune_to_static_after_train(self):
layer = LinearNet(784, 1) layer = LinearNet(784, 1)
...@@ -1056,7 +1058,7 @@ class TestJitSaveLoadEmptyLayer(unittest.TestCase): ...@@ -1056,7 +1058,7 @@ class TestJitSaveLoadEmptyLayer(unittest.TestCase):
paddle.jit.save(layer, self.model_path) paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path) load_layer = paddle.jit.load(self.model_path)
load_out = load_layer(x) load_out = load_layer(x)
self.assertTrue(np.array_equal(out, load_out)) np.testing.assert_array_equal(out, load_out)
class TestJitSaveLoadNoParamLayer(unittest.TestCase): class TestJitSaveLoadNoParamLayer(unittest.TestCase):
...@@ -1079,7 +1081,7 @@ class TestJitSaveLoadNoParamLayer(unittest.TestCase): ...@@ -1079,7 +1081,7 @@ class TestJitSaveLoadNoParamLayer(unittest.TestCase):
paddle.jit.save(layer, self.model_path) paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path) load_layer = paddle.jit.load(self.model_path)
load_out = load_layer(x, y) load_out = load_layer(x, y)
self.assertTrue(np.array_equal(out, load_out)) np.testing.assert_array_equal(out, load_out)
class TestJitSaveLoadMultiMethods(unittest.TestCase): class TestJitSaveLoadMultiMethods(unittest.TestCase):
...@@ -1506,7 +1508,7 @@ class TestJitSaveLoadFunctionWithParamCase1(unittest.TestCase): ...@@ -1506,7 +1508,7 @@ class TestJitSaveLoadFunctionWithParamCase1(unittest.TestCase):
load_func = paddle.jit.load(path) load_func = paddle.jit.load(path)
load_result = load_func(inps) load_result = load_func(inps)
self.assertTrue(np.array_equal(load_result.numpy(), origin.numpy())) np.testing.assert_array_equal(load_result.numpy(), origin.numpy())
class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase): class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase):
...@@ -1546,8 +1548,8 @@ class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase): ...@@ -1546,8 +1548,8 @@ class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase):
load_result = load_func(inps) load_result = load_func(inps)
self.assertTrue( np.testing.assert_array_equal(origin_result.numpy(),
np.array_equal(origin_result.numpy(), load_result.numpy())) load_result.numpy())
class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase): class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase):
...@@ -1586,7 +1588,7 @@ class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase): ...@@ -1586,7 +1588,7 @@ class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase):
load_func = paddle.jit.load(path) load_func = paddle.jit.load(path)
load_result = load_func(inps) load_result = load_func(inps)
self.assertTrue(np.array_equal(load_result.numpy(), origin.numpy())) np.testing.assert_array_equal(load_result.numpy(), origin.numpy())
class TestJitSaveLoadDataParallel(unittest.TestCase): class TestJitSaveLoadDataParallel(unittest.TestCase):
...@@ -1605,10 +1607,12 @@ class TestJitSaveLoadDataParallel(unittest.TestCase): ...@@ -1605,10 +1607,12 @@ class TestJitSaveLoadDataParallel(unittest.TestCase):
x = paddle.to_tensor(np.random.random((1, 784)).astype('float32')) x = paddle.to_tensor(np.random.random((1, 784)).astype('float32'))
pred = layer(x).numpy() pred = layer(x).numpy()
loaded_pred = loaded_layer(x).numpy() loaded_pred = loaded_layer(x).numpy()
self.assertTrue( np.testing.assert_array_equal(
np.array_equal(pred, loaded_pred), pred,
msg="Result diff when load and inference:\nlayer result:\n{}\n" \ loaded_pred,
"loaded layer result:\n{}".format(pred, loaded_pred)) err_msg=
'Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}'
.format(pred, loaded_pred))
def test_jit_save_data_parallel_with_inputspec(self): def test_jit_save_data_parallel_with_inputspec(self):
layer = LinearNetNotDeclarative(784, 1) layer = LinearNetNotDeclarative(784, 1)
......
...@@ -237,8 +237,8 @@ class TestLambOpMultiPrecision(unittest.TestCase): ...@@ -237,8 +237,8 @@ class TestLambOpMultiPrecision(unittest.TestCase):
if multi_precision: if multi_precision:
params[0] = np.array(params[0]) params[0] = np.array(params[0])
params[1] = np.array(params[1]) params[1] = np.array(params[1])
self.assertTrue( np.testing.assert_array_equal(params[0],
np.array_equal(params[0], params[1].astype(np.float16))) params[1].astype(np.float16))
return params[0].astype(np.float32) return params[0].astype(np.float32)
else: else:
self.assertTrue(params[0] is not None) self.assertTrue(params[0] is not None)
...@@ -259,9 +259,8 @@ class TestLambOpMultiPrecision(unittest.TestCase): ...@@ -259,9 +259,8 @@ class TestLambOpMultiPrecision(unittest.TestCase):
fetch_list=[weight, bias]) fetch_list=[weight, bias])
weight_np = weight_np.astype('float32') weight_np = weight_np.astype('float32')
bias_np = bias_np.astype('float32') bias_np = bias_np.astype('float32')
self.assertTrue(np.array_equal(weight_np, np.testing.assert_array_equal(weight_np, get_parameter(weight))
get_parameter(weight))) np.testing.assert_array_equal(bias_np, get_parameter(bias))
self.assertTrue(np.array_equal(bias_np, get_parameter(bias)))
return weight_np, bias_np return weight_np, bias_np
@switch_to_static_graph @switch_to_static_graph
......
...@@ -381,7 +381,7 @@ class TestFP16ScaleBiasLayerNorm(unittest.TestCase): ...@@ -381,7 +381,7 @@ class TestFP16ScaleBiasLayerNorm(unittest.TestCase):
x_np, weight_np, bias_np, 'float32') x_np, weight_np, bias_np, 'float32')
def assert_equal(x, y): def assert_equal(x, y):
self.assertTrue(np.array_equal(x, y)) np.testing.assert_array_equal(x, y)
assert_equal(y_np_1, y_np_2) assert_equal(y_np_1, y_np_2)
assert_equal(x_g_np_1, x_g_np_2) assert_equal(x_g_np_1, x_g_np_2)
......
...@@ -62,7 +62,7 @@ class TestLoadOp(unittest.TestCase): ...@@ -62,7 +62,7 @@ class TestLoadOp(unittest.TestCase):
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(start_prog) exe.run(start_prog)
ret = exe.run(main_prog, fetch_list=[var.name]) ret = exe.run(main_prog, fetch_list=[var.name])
self.assertTrue(np.array_equal(self.ones, ret[0])) np.testing.assert_array_equal(self.ones, ret[0])
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -64,7 +64,7 @@ class TestLoadOpXpu(unittest.TestCase): ...@@ -64,7 +64,7 @@ class TestLoadOpXpu(unittest.TestCase):
exe = fluid.Executor(fluid.XPUPlace(0)) exe = fluid.Executor(fluid.XPUPlace(0))
exe.run(start_prog) exe.run(start_prog)
ret = exe.run(main_prog, fetch_list=[var.name]) ret = exe.run(main_prog, fetch_list=[var.name])
self.assertTrue(np.array_equal(self.ones, ret[0])) np.testing.assert_array_equal(self.ones, ret[0])
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -123,7 +123,7 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase): ...@@ -123,7 +123,7 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase):
def check_load_state_dict(self, orig_dict, load_dict): def check_load_state_dict(self, orig_dict, load_dict):
for var_name, value in six.iteritems(orig_dict): for var_name, value in six.iteritems(orig_dict):
self.assertTrue(np.array_equal(value, load_dict[var_name])) np.testing.assert_array_equal(value, load_dict[var_name])
def test_load_default(self): def test_load_default(self):
self.save_dirname = os.path.join( self.save_dirname = os.path.join(
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册