未验证 提交 93c5c887 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle] use np.testing.assert_array_equal instead of...

[CodeStyle] use np.testing.assert_array_equal instead of self.assertTrue(np.array_equal(...)) (#44947)

* automatically fix

* update comments

* numpy -> np

* self.assertEqual(..., True)

* wrong usage (err_msg=True)

这不是修复导致的错误,这些是原来 `self.assertTrue(..., True)`
的错误用法,因此在修复后将其认为位置参数 `err_msg`

* some missing fix
上级 f694e991
......@@ -43,9 +43,10 @@ class TestCustomKernelDot(unittest.TestCase):
y = paddle.to_tensor(y_data)
out = paddle.dot(x, y)
self.assertTrue(
np.array_equal(out.numpy(), result),
"custom kernel dot out: {},\n numpy dot out: {}".format(
np.testing.assert_array_equal(
out.numpy(),
result,
err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format(
out.numpy(), result))
......@@ -72,9 +73,10 @@ class TestCustomKernelDotC(unittest.TestCase):
y = paddle.to_tensor(y_data)
out = paddle.dot(x, y)
self.assertTrue(
np.array_equal(out.numpy(), result),
"custom kernel dot out: {},\n numpy dot out: {}".format(
np.testing.assert_array_equal(
out.numpy(),
result,
err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format(
out.numpy(), result))
......
......@@ -65,9 +65,10 @@ class TestCustomKernelLoad(unittest.TestCase):
y = paddle.to_tensor(y_data)
out = paddle.dot(x, y)
self.assertTrue(
np.array_equal(out.numpy(), result),
"custom kernel dot out: {},\n numpy dot out: {}".format(
np.testing.assert_array_equal(
out.numpy(),
result,
err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format(
out.numpy(), result))
def tearDown(self):
......
......@@ -51,7 +51,7 @@ class TestContextPool(unittest.TestCase):
x = paddle.ones([2, 2], dtype='float32')
out = custom_ops.context_pool_test(x)
self.assertTrue(np.array_equal(x.numpy(), out.numpy()))
np.testing.assert_array_equal(x.numpy(), out.numpy())
def test_using_context_pool(self):
with _test_eager_guard():
......
......@@ -66,7 +66,7 @@ class TestJitCustomAttrs(unittest.TestCase):
out.stop_gradient = False
out.backward()
self.assertTrue(np.array_equal(x.numpy(), out.numpy()))
np.testing.assert_array_equal(x.numpy(), out.numpy())
def test_attr_value(self):
with _test_eager_guard():
......@@ -85,7 +85,7 @@ class TestJitCustomAttrs(unittest.TestCase):
out.stop_gradient = False
out.backward()
self.assertTrue(np.array_equal(x.numpy(), out.numpy()))
np.testing.assert_array_equal(x.numpy(), out.numpy())
def test_const_attr_value(self):
with _test_eager_guard():
......
......@@ -112,9 +112,10 @@ class TestCustomConcatDynamicAxisJit(unittest.TestCase):
self.axises = [0, 1]
def check_output(self, out, pd_out, name):
self.assertTrue(
np.array_equal(out, pd_out),
"custom op {}: {},\n paddle api {}: {}".format(
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out))
def func_dynamic(self):
......
......@@ -97,9 +97,10 @@ class TestCustomConjJit(unittest.TestCase):
self.shape = [2, 20, 2, 3]
def check_output(self, out, pd_out, name):
self.assertTrue(
np.array_equal(out, pd_out),
"custom op {}: {},\n paddle api {}: {}".format(
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out))
def run_dynamic(self, dtype, np_input):
......
......@@ -97,9 +97,10 @@ class TestCustomLinearJit(unittest.TestCase):
self.np_bias = np.ones([4], dtype="float32")
def check_output(self, out, pd_out, name):
self.assertTrue(
np.array_equal(out, pd_out),
"custom op {}: {},\n paddle api {}: {}".format(
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op {}: {},\n paddle api {}: {}'.format(
name, out, name, pd_out))
def test_static(self):
......
......@@ -83,7 +83,7 @@ class TestCustomRawReluOp(unittest.TestCase):
y1_value, y2_value = exe.run(paddle.static.default_main_program(),
feed={x.name: x_np},
fetch_list=[y1, y2])
self.assertTrue(np.array_equal(y1_value, y2_value))
np.testing.assert_array_equal(y1_value, y2_value)
paddle.disable_static()
......
......@@ -121,12 +121,11 @@ class TestDygraphModel(unittest.TestCase):
if _in_legacy_dygraph():
custom_relu_dy2stat_train_out = self.train_model(
use_custom_op=True, dy2stat=True) # for to_static
self.assertTrue(
np.array_equal(origin_relu_train_out,
custom_relu_dy2stat_train_out))
np.testing.assert_array_equal(origin_relu_train_out,
custom_relu_dy2stat_train_out)
self.assertTrue(
np.array_equal(origin_relu_train_out, custom_relu_train_out))
np.testing.assert_array_equal(origin_relu_train_out,
custom_relu_train_out)
# for eval
origin_relu_eval_out = self.eval_model(use_custom_op=False)
......@@ -134,12 +133,11 @@ class TestDygraphModel(unittest.TestCase):
if _in_legacy_dygraph():
custom_relu_dy2stat_eval_out = self.eval_model(
use_custom_op=True, dy2stat=True) # for to_static
self.assertTrue(
np.array_equal(origin_relu_eval_out,
custom_relu_dy2stat_eval_out))
np.testing.assert_array_equal(origin_relu_eval_out,
custom_relu_dy2stat_eval_out)
self.assertTrue(
np.array_equal(origin_relu_eval_out, custom_relu_eval_out))
np.testing.assert_array_equal(origin_relu_eval_out,
custom_relu_eval_out)
def test_train_eval(self):
with _test_eager_guard():
......@@ -243,11 +241,10 @@ class TestStaticModel(unittest.TestCase):
use_custom_op=True,
use_pe=True)
self.assertTrue(
np.array_equal(original_relu_train_out, custom_relu_train_out))
self.assertTrue(
np.array_equal(original_relu_train_pe_out,
custom_relu_train_pe_out))
np.testing.assert_array_equal(original_relu_train_out,
custom_relu_train_out)
np.testing.assert_array_equal(original_relu_train_pe_out,
custom_relu_train_pe_out)
# for eval
original_relu_eval_out = self.eval_model(device,
......@@ -261,11 +258,10 @@ class TestStaticModel(unittest.TestCase):
use_custom_op=True,
use_pe=True)
self.assertTrue(
np.array_equal(original_relu_eval_out, custom_relu_eval_out))
self.assertTrue(
np.array_equal(original_relu_eval_pe_out,
custom_relu_eval_pe_out))
np.testing.assert_array_equal(original_relu_eval_out,
custom_relu_eval_out)
np.testing.assert_array_equal(original_relu_eval_pe_out,
custom_relu_eval_pe_out)
def train_model(self, device, use_custom_op=False, use_pe=False):
# reset random seed
......
......@@ -71,10 +71,11 @@ class TestJITLoad(unittest.TestCase):
out = custom_relu_static(custom_op, device, dtype, x)
pd_out = custom_relu_static(custom_op, device, dtype, x,
False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.
format(out, pd_out))
def func_dynamic(self):
for device in self.devices:
......@@ -87,14 +88,16 @@ class TestJITLoad(unittest.TestCase):
x)
pd_out, pd_x_grad = custom_relu_dynamic(
custom_op, device, dtype, x, False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
self.assertTrue(
np.array_equal(x_grad, pd_x_grad),
"custom op x grad: {},\n paddle api x grad: {}".format(
x_grad, pd_x_grad))
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.
format(out, pd_out))
np.testing.assert_array_equal(
x_grad,
pd_x_grad,
err_msg='custom op x grad: {},\n paddle api x grad: {}'.
format(x_grad, pd_x_grad))
def test_dynamic(self):
with _test_eager_guard():
......
......@@ -224,10 +224,11 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
out = custom_relu_static(custom_op, device, dtype, x)
pd_out = custom_relu_static(custom_op, device, dtype, x,
False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.
format(out, pd_out))
def test_static_pe(self):
for device in self.devices:
......@@ -239,10 +240,11 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
out = custom_relu_static_pe(custom_op, device, dtype, x)
pd_out = custom_relu_static_pe(custom_op, device, dtype, x,
False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.
format(out, pd_out))
def func_dynamic(self):
for device in self.devices:
......@@ -255,14 +257,16 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
x)
pd_out, pd_x_grad = custom_relu_dynamic(
custom_op, device, dtype, x, False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
self.assertTrue(
np.array_equal(x_grad, pd_x_grad),
"custom op x grad: {},\n paddle api x grad: {}".format(
x_grad, pd_x_grad))
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.
format(out, pd_out))
np.testing.assert_array_equal(
x_grad,
pd_x_grad,
err_msg='custom op x grad: {},\n paddle api x grad: {}'.
format(x_grad, pd_x_grad))
def test_dynamic(self):
with _test_eager_guard():
......@@ -286,10 +290,11 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
predict_infer = exe.run(inference_program,
feed={feed_target_names[0]: np_data},
fetch_list=fetch_targets)
self.assertTrue(
np.array_equal(predict, predict_infer),
"custom op predict: {},\n custom op infer predict: {}".
format(predict, predict_infer))
np.testing.assert_array_equal(
predict,
predict_infer,
err_msg='custom op predict: {},\n custom op infer predict: {}'
.format(predict, predict_infer))
paddle.disable_static()
def test_static_save_and_run_inference_predictor(self):
......@@ -331,14 +336,16 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
self.custom_ops[0], device, dtype, x)
pd_out, pd_dx_grad = custom_relu_double_grad_dynamic(
self.custom_ops[0], device, dtype, x, False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out))
self.assertTrue(
np.array_equal(dx_grad, pd_dx_grad),
"custom op dx grad: {},\n paddle api dx grad: {}".format(
dx_grad, pd_dx_grad))
np.testing.assert_array_equal(
dx_grad,
pd_dx_grad,
err_msg='custom op dx grad: {},\n paddle api dx grad: {}'.
format(dx_grad, pd_dx_grad))
def test_with_dataloader(self):
for device in self.devices:
......@@ -357,9 +364,10 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
for batch_id, (image, _) in enumerate(train_loader()):
out = self.custom_ops[0](image)
pd_out = paddle.nn.functional.relu(image)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
np.testing.assert_array_equal(
out,
pd_out,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out))
if batch_id == 5:
......
......@@ -46,9 +46,11 @@ class TestCustomSimpleSliceJit(unittest.TestCase):
x = paddle.to_tensor(np_x)
custom_op_out = custom_ops.custom_simple_slice(x, 2, 3)
np_out = np_x[2:3]
self.assertTrue(
np.array_equal(custom_op_out, np_out),
"custom op: {},\n numpy: {}".format(np_out, custom_op_out.numpy()))
np.testing.assert_array_equal(
custom_op_out,
np_out,
err_msg='custom op: {},\n numpy: {}'.format(np_out,
custom_op_out.numpy()))
def test_slice_output(self):
with _test_eager_guard():
......
......@@ -47,9 +47,10 @@ class TestJitDispatch(unittest.TestCase):
np_x = x.numpy()
np_out = out.numpy()
self.assertTrue(dtype in str(np_out.dtype))
self.assertTrue(
np.array_equal(np_x, np_out),
"custom op x: {},\n custom op out: {}".format(np_x, np_out))
np.testing.assert_array_equal(
np_x,
np_out,
err_msg='custom op x: {},\n custom op out: {}'.format(np_x, np_out))
def run_dispatch_test(self, func, dtype):
with _test_eager_guard():
......
......@@ -70,14 +70,12 @@ class TestMultiOutputDtypes(unittest.TestCase):
one_int32 = one_int32.numpy()
# Fake_float64
self.assertTrue('float64' in str(zero_float64.dtype))
self.assertTrue(
np.array_equal(zero_float64,
np.zeros([4, 8]).astype('float64')))
np.testing.assert_array_equal(zero_float64,
np.zeros([4, 8]).astype('float64'))
# ZFake_int32
self.assertTrue('int32' in str(one_int32.dtype))
self.assertTrue(
np.array_equal(one_int32,
np.ones([4, 8]).astype('int32')))
np.testing.assert_array_equal(one_int32,
np.ones([4, 8]).astype('int32'))
def test_static(self):
paddle.enable_static()
......
......@@ -144,21 +144,21 @@ class TestCustomCPUPlugin(unittest.TestCase):
place=paddle.CPUPlace())
custom_cpu_tensor = cpu_tensor._copy_to(
paddle.CustomPlace('custom_cpu', 0), True)
self.assertTrue(np.array_equal(custom_cpu_tensor, x))
np.testing.assert_array_equal(custom_cpu_tensor, x)
self.assertTrue(custom_cpu_tensor.place.is_custom_place())
# custom -> custom
another_custom_cpu_tensor = custom_cpu_tensor._copy_to(
paddle.CustomPlace('custom_cpu', 0), True)
self.assertTrue(np.array_equal(another_custom_cpu_tensor, x))
np.testing.assert_array_equal(another_custom_cpu_tensor, x)
self.assertTrue(another_custom_cpu_tensor.place.is_custom_place())
# custom -> cpu
another_cpu_tensor = custom_cpu_tensor._copy_to(paddle.CPUPlace(), True)
self.assertTrue(np.array_equal(another_cpu_tensor, x))
np.testing.assert_array_equal(another_cpu_tensor, x)
self.assertTrue(another_cpu_tensor.place.is_cpu_place())
# custom -> custom self
another_custom_cpu_tensor = another_custom_cpu_tensor._copy_to(
paddle.CustomPlace('custom_cpu', 0), True)
self.assertTrue(np.array_equal(another_custom_cpu_tensor, x))
np.testing.assert_array_equal(another_custom_cpu_tensor, x)
self.assertTrue(another_custom_cpu_tensor.place.is_custom_place())
def _test_fallback_kernel(self):
......@@ -168,7 +168,7 @@ class TestCustomCPUPlugin(unittest.TestCase):
x = paddle.to_tensor([5, 4, 3], 'int16')
y = paddle.to_tensor([1, 2, 3], 'int16')
z = paddle.add(x, y)
self.assertTrue(np.array_equal(z, r))
np.testing.assert_array_equal(z, r)
def tearDown(self):
del os.environ['CUSTOM_DEVICE_ROOT']
......
......@@ -617,9 +617,9 @@ class TestGenerateProposals(LayerTest):
roi_probs_dy = roi_probs.numpy()
rois_num_dy = rois_num.numpy()
self.assertTrue(np.array_equal(np.array(rois_stat), rois_dy))
self.assertTrue(np.array_equal(np.array(roi_probs_stat), roi_probs_dy))
self.assertTrue(np.array_equal(np.array(rois_num_stat), rois_num_dy))
np.testing.assert_array_equal(np.array(rois_stat), rois_dy)
np.testing.assert_array_equal(np.array(roi_probs_stat), roi_probs_dy)
np.testing.assert_array_equal(np.array(rois_num_stat), rois_num_dy)
class TestYoloDetection(unittest.TestCase):
......@@ -837,8 +837,8 @@ class TestCollectFpnPropsals(LayerTest):
fpn_rois_dy = fpn_rois_dy.numpy()
rois_num_dy = rois_num_dy.numpy()
self.assertTrue(np.array_equal(fpn_rois_stat, fpn_rois_dy))
self.assertTrue(np.array_equal(rois_num_stat, rois_num_dy))
np.testing.assert_array_equal(fpn_rois_stat, fpn_rois_dy)
np.testing.assert_array_equal(rois_num_stat, rois_num_dy)
def test_collect_fpn_proposals_error(self):
......@@ -932,7 +932,7 @@ class TestDistributeFpnProposals(LayerTest):
output_dy_np.append(output_np)
for res_stat, res_dy in zip(output_stat_np, output_dy_np):
self.assertTrue(np.array_equal(res_stat, res_dy))
np.testing.assert_array_equal(res_stat, res_dy)
def test_distribute_fpn_proposals_error(self):
program = Program()
......
......@@ -71,11 +71,9 @@ class TestLoDTensor(unittest.TestCase):
correct_recursive_seq_lens)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT64)
self.assertEqual(tensor.shape(), [5, 1])
self.assertTrue(
np.array_equal(
np.array(tensor),
np.array([1, 2, 3, 3,
4]).reshape(tensor.shape()).astype('int64')))
np.testing.assert_array_equal(
np.array(tensor),
np.array([1, 2, 3, 3, 4]).reshape(tensor.shape()).astype('int64'))
# Create LoDTensor from numpy array
data = np.random.random([10, 1]).astype('float64')
......@@ -85,7 +83,7 @@ class TestLoDTensor(unittest.TestCase):
recursive_seq_lens)
self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP64)
self.assertEqual(tensor.shape(), [10, 1])
self.assertTrue(np.array_equal(np.array(tensor), data))
np.testing.assert_array_equal(np.array(tensor), data)
# Create LoDTensor from another LoDTensor, they are differnt instances
new_recursive_seq_lens = [[2, 2, 1], [1, 2, 2, 3, 2]]
......@@ -133,9 +131,9 @@ class TestLoDTensor(unittest.TestCase):
dltensor = tensor._to_dlpack()
tensor_from_dlpack = fluid.core.from_dlpack(dltensor)
self.assertTrue(isinstance(tensor_from_dlpack, fluid.core.Tensor))
self.assertTrue(
np.array_equal(np.array(tensor_from_dlpack),
np.array([[1], [2], [3], [4]]).astype('int')))
np.testing.assert_array_equal(
np.array(tensor_from_dlpack),
np.array([[1], [2], [3], [4]]).astype('int'))
# when build with cuda
if core.is_compiled_with_cuda():
gtensor = fluid.create_lod_tensor(
......@@ -144,9 +142,9 @@ class TestLoDTensor(unittest.TestCase):
gdltensor = gtensor._to_dlpack()
gtensor_from_dlpack = fluid.core.from_dlpack(gdltensor)
self.assertTrue(isinstance(gtensor_from_dlpack, fluid.core.Tensor))
self.assertTrue(
np.array_equal(np.array(gtensor_from_dlpack),
np.array([[1], [2], [3], [4]]).astype('int')))
np.testing.assert_array_equal(
np.array(gtensor_from_dlpack),
np.array([[1], [2], [3], [4]]).astype('int'))
def test_as_type(self):
tensor = fluid.create_lod_tensor(
......
......@@ -164,7 +164,7 @@ class TestChooseShapeAttrOrApiWithLayer(unittest.TestCase):
net = ShapeLayer()
out = net(x)
self.assertTrue(np.array_equal(out.numpy(), x.numpy()))
np.testing.assert_array_equal(out.numpy(), x.numpy())
class TestIfElseNoValue(unittest.TestCase):
......
......@@ -36,7 +36,7 @@ class TestDeepCopy(unittest.TestCase):
self.assertFalse(isinstance(net.forward, StaticFunction))
self.assertTrue(id(copy_net), id(copy_net.forward.__self__))
self.assertTrue(np.array_equal(src_out.numpy(), copy_out.numpy()))
np.testing.assert_array_equal(src_out.numpy(), copy_out.numpy())
def test_func(self):
st_foo = paddle.jit.to_static(foo)
......@@ -48,7 +48,7 @@ class TestDeepCopy(unittest.TestCase):
new_foo = deepcopy(st_foo)
self.assertFalse(isinstance(new_foo, StaticFunction))
new_out = new_foo(x)
self.assertTrue(np.array_equal(st_out.numpy(), new_out.numpy()))
np.testing.assert_array_equal(st_out.numpy(), new_out.numpy())
if __name__ == "__main__":
......
......@@ -201,7 +201,7 @@ class TestPruneUnusedParamInProgram(unittest.TestCase):
model.eval()
input_ids = paddle.to_tensor(input_ids)
out = model(input_ids)
self.assertTrue(np.array_equal(out.numpy(), [[15, 11]]))
np.testing.assert_array_equal(out.numpy(), [[15, 11]])
if __name__ == '__main__':
......
......@@ -82,7 +82,7 @@ class TestRollBackPlainFunction(unittest.TestCase):
dy_out = st_foo(x)
self.assertTrue(func_to_source_code(foo) == func_to_source_code(st_foo))
self.assertTrue(np.array_equal(st_out.numpy(), dy_out.numpy()))
np.testing.assert_array_equal(st_out.numpy(), dy_out.numpy())
class TestRollBackNet(unittest.TestCase):
......@@ -111,15 +111,15 @@ class TestRollBackNet(unittest.TestCase):
self.assertFalse(isinstance(net.forward, StaticFunction))
self.assertFalse("true_fn" in func_to_source_code(net.sub.forward))
dy_fwd_out = net(x)
self.assertTrue(np.array_equal(st_fwd_out.numpy(), dy_fwd_out.numpy()))
np.testing.assert_array_equal(st_fwd_out.numpy(), dy_fwd_out.numpy())
# rollback infer into original dygraph method
net.infer.rollback()
self.assertFalse(isinstance(net.infer, StaticFunction))
self.assertFalse("true_fn" in func_to_source_code(net.sub.forward))
dy_infer_out = net.infer(x)
self.assertTrue(
np.array_equal(st_infer_out.numpy(), dy_infer_out.numpy()))
np.testing.assert_array_equal(st_infer_out.numpy(),
dy_infer_out.numpy())
if __name__ == "__main__":
......
......@@ -208,8 +208,8 @@ class TestSliceSupplementSpecialCase(unittest.TestCase):
out = exe.run(prog, feed={'x': array}, fetch_list=[z1, z2])
self.assertTrue(np.array_equal(out[0], array[::2]))
self.assertTrue(np.array_equal(out[1], array[::-2]))
np.testing.assert_array_equal(out[0], array[::2])
np.testing.assert_array_equal(out[1], array[::-2])
def test_static_slice_step_dygraph2static(self):
paddle.disable_static()
......@@ -225,10 +225,10 @@ class TestSliceSupplementSpecialCase(unittest.TestCase):
input_spec=[InputSpec(shape=[None, 4, 4])])
static_result = sfunc(inps)
self.assertTrue(
np.array_equal(origin_result[0].numpy(), static_result[0].numpy()))
self.assertTrue(
np.array_equal(origin_result[1].numpy(), static_result[1].numpy()))
np.testing.assert_array_equal(origin_result[0].numpy(),
static_result[0].numpy())
np.testing.assert_array_equal(origin_result[1].numpy(),
static_result[1].numpy())
class TestPaddleStridedSlice(unittest.TestCase):
......@@ -268,10 +268,8 @@ class TestPaddleStridedSlice(unittest.TestCase):
ends=e2,
strides=stride2)
self.assertTrue(
np.array_equal(
sl.numpy(), array[s2[0]:e2[0]:stride2[0],
s2[1]:e2[1]:stride2[1]]))
np.testing.assert_array_equal(
sl.numpy(), array[s2[0]:e2[0]:stride2[0], s2[1]:e2[1]:stride2[1]])
array = np.arange(6 * 7 * 8).reshape((6, 7, 8))
pt = paddle.to_tensor(array)
......@@ -285,9 +283,10 @@ class TestPaddleStridedSlice(unittest.TestCase):
strides=stride2)
array_slice = array[s2[0]:e2[0]:stride2[0], ::, s2[1]:e2[1]:stride2[1]]
self.assertTrue(
np.array_equal(sl.numpy(), array_slice),
msg="paddle.strided_slice:\n {} \n numpy slice:\n{}".format(
np.testing.assert_array_equal(
sl.numpy(),
array_slice,
err_msg='paddle.strided_slice:\n {} \n numpy slice:\n{}'.format(
sl.numpy(), array_slice))
......
......@@ -97,9 +97,9 @@ class TestCompatibility(unittest.TestCase):
for x, y in zip(gt, res):
if isinstance(x, list):
for tx, ty in zip(x, y):
self.assertTrue(np.array_equal(tx, ty))
np.testing.assert_array_equal(tx, ty)
elif isinstance(x, np.ndarray):
self.assertTrue(np.array_equal(tx, ty))
np.testing.assert_array_equal(tx, ty)
else:
raise Exception("Not Implement!")
......
......@@ -261,7 +261,7 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase):
res = self.run_new_executor(feed)
gt = self.run_raw_executor(feed)
for x, y in zip(gt, res):
self.assertTrue(np.array_equal(x, y))
np.testing.assert_array_equal(x, y)
def test_with_error(self):
feed = [{'a': np.ones([2, 2], dtype="float32")}]
......@@ -277,7 +277,7 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase):
res = self.run_new_executor(feed, use_compiled=True)
gt = self.run_raw_executor(feed, use_compiled=True)
for x, y in zip(gt, res):
self.assertTrue(np.array_equal(x, y))
np.testing.assert_array_equal(x, y)
def test_compiled_program_convert_graph_to_program(self):
data = np.ones([2, 2], dtype="float32")
......@@ -286,7 +286,7 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase):
res = self.run_new_executor(feed, use_compiled=True)
gt = self.run_raw_executor(feed, use_compiled=True)
for x, y in zip(gt, res):
self.assertTrue(np.array_equal(x, y))
np.testing.assert_array_equal(x, y)
def test_empty_program(self):
program = paddle.static.Program()
......
......@@ -185,7 +185,7 @@ class TestScaleApiStatic(unittest.TestCase):
exe = paddle.static.Executor(place=paddle.CPUPlace())
out = exe.run(main_prog, feed={"x": input}, fetch_list=[out])
self.assertEqual(np.array_equal(out[0], input * 2.0 + 3.0), True)
np.testing.assert_array_equal(out[0], input * 2.0 + 3.0)
class TestScaleInplaceApiStatic(TestScaleApiStatic):
......@@ -204,7 +204,7 @@ class TestScaleApiDygraph(unittest.TestCase):
input = np.random.random([2, 25]).astype("float32")
x = paddle.to_tensor(input)
out = self._executed_api(x, scale=2.0, bias=3.0)
self.assertEqual(np.array_equal(out.numpy(), input * 2.0 + 3.0), True)
np.testing.assert_array_equal(out.numpy(), input * 2.0 + 3.0)
paddle.enable_static()
......
......@@ -356,13 +356,13 @@ class TestMoveAxis(unittest.TestCase):
exe = paddle.static.Executor()
out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0]
self.assertEqual(np.array_equal(out_np, expected), True)
np.testing.assert_array_equal(out_np, expected)
paddle.disable_static()
x = paddle.to_tensor(x_np)
out = paddle.moveaxis(x, [0, 4, 3, 2], [1, 3, 2, 0])
self.assertEqual(out.shape, [4, 2, 5, 7, 3])
self.assertEqual(np.array_equal(out.numpy(), expected), True)
np.testing.assert_array_equal(out.numpy(), expected)
paddle.enable_static()
def test_moveaxis2(self):
......@@ -376,13 +376,13 @@ class TestMoveAxis(unittest.TestCase):
exe = paddle.static.Executor()
out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0]
self.assertEqual(np.array_equal(out_np, expected), True)
np.testing.assert_array_equal(out_np, expected)
paddle.disable_static()
x = paddle.to_tensor(x_np)
out = x.moveaxis(-2, -1)
self.assertEqual(out.shape, [2, 5, 3])
self.assertEqual(np.array_equal(out.numpy(), expected), True)
np.testing.assert_array_equal(out.numpy(), expected)
paddle.enable_static()
def test_error(self):
......
......@@ -1048,12 +1048,13 @@ class OpTest(unittest.TestCase):
str(expect_out) + "\n" + "But Got" + str(actual_out) +
" in class " + self.__class__.__name__)
else:
self.assertTrue(
np.array_equal(expect_out, actual_out),
"Output (" + name + ") has diff at " + str(place) +
" when using and not using inplace" + "\nExpect " +
str(expect_out) + "\n" + "But Got" + str(actual_out) +
" in class " + self.__class__.__name__ + '\n')
np.testing.assert_array_equal(
expect_out,
actual_out,
err_msg='Output (' + name + ') has diff at ' + str(place) +
' when using and not using inplace' + '\nExpect ' +
str(expect_out) + '\n' + 'But Got' + str(actual_out) +
' in class ' + self.__class__.__name__ + '\n')
def _construct_grad_program_from_forward(self, fwd_program, grad_op_desc,
op_grad_to_var):
......
......@@ -51,7 +51,7 @@ class TestSparseSquareOp(unittest.TestCase):
# get and compare result
result_array = np.array(out_selected_rows.get_tensor())
self.assertTrue(np.array_equal(result_array, np.square(np_array)))
np.testing.assert_array_equal(result_array, np.square(np_array))
def test_sparse_acti(self):
places = [core.CPUPlace()]
......
......@@ -214,9 +214,9 @@ class TestAssignOApi(unittest.TestCase):
y = clone_x**3
y.backward()
self.assertTrue(np.array_equal(x, [1, 1]), True)
self.assertTrue(np.array_equal(clone_x.grad.numpy(), [3, 3]), True)
self.assertTrue(np.array_equal(x.grad.numpy(), [3, 3]), True)
np.testing.assert_array_equal(x, [1, 1])
np.testing.assert_array_equal(clone_x.grad.numpy(), [3, 3])
np.testing.assert_array_equal(x.grad.numpy(), [3, 3])
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
paddle.enable_static()
......@@ -229,7 +229,7 @@ class TestAssignOApi(unittest.TestCase):
feed={'X': x_np},
fetch_list=[clone_x])[0]
self.assertTrue(np.array_equal(y_np, x_np), True)
np.testing.assert_array_equal(y_np, x_np)
paddle.disable_static()
......
......@@ -15,7 +15,7 @@
from __future__ import print_function
import unittest
import numpy
import numpy as np
import op_test
import paddle
......@@ -39,7 +39,7 @@ class TestAssignValueOp(op_test.OpTest):
self.outputs = {"Out": self.value}
def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(numpy.float32)
self.value = np.random.random(size=(2, 5)).astype(np.float32)
self.attrs["fp32_values"] = [float(v) for v in self.value.flat]
def test_forward(self):
......@@ -49,22 +49,22 @@ class TestAssignValueOp(op_test.OpTest):
class TestAssignValueOp2(TestAssignValueOp):
def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(numpy.int32)
self.value = np.random.random(size=(2, 5)).astype(np.int32)
self.attrs["int32_values"] = [int(v) for v in self.value.flat]
class TestAssignValueOp3(TestAssignValueOp):
def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(numpy.int64)
self.value = np.random.random(size=(2, 5)).astype(np.int64)
self.attrs["int64_values"] = [int(v) for v in self.value.flat]
class TestAssignValueOp4(TestAssignValueOp):
def init_data(self):
self.value = numpy.random.choice(a=[False, True],
size=(2, 5)).astype(numpy.bool)
self.value = np.random.choice(a=[False, True],
size=(2, 5)).astype(np.bool)
self.attrs["bool_values"] = [int(v) for v in self.value.flat]
......@@ -72,7 +72,7 @@ class TestAssignApi(unittest.TestCase):
def setUp(self):
self.init_dtype()
self.value = (-100 + 200 * numpy.random.random(size=(2, 5))).astype(
self.value = (-100 + 200 * np.random.random(size=(2, 5))).astype(
self.dtype)
self.place = fluid.CUDAPlace(
0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
......@@ -88,8 +88,10 @@ class TestAssignApi(unittest.TestCase):
exe = fluid.Executor(self.place)
[fetched_x] = exe.run(main_program, feed={}, fetch_list=[x])
self.assertTrue(numpy.array_equal(fetched_x, self.value),
"fetch_x=%s val=%s" % (fetched_x, self.value))
np.testing.assert_array_equal(fetched_x,
self.value,
err_msg='fetch_x=%s val=%s' %
(fetched_x, self.value))
self.assertEqual(fetched_x.dtype, self.value.dtype)
......@@ -109,8 +111,8 @@ class TestAssignApi4(TestAssignApi):
def setUp(self):
self.init_dtype()
self.value = numpy.random.choice(a=[False, True],
size=(2, 5)).astype(numpy.bool)
self.value = np.random.choice(a=[False, True],
size=(2, 5)).astype(np.bool)
self.place = fluid.CUDAPlace(
0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
......
......@@ -371,7 +371,7 @@ class TestBuffer(unittest.TestCase):
self.func_test_buffer_state_dict()
def assert_var_base_equal(self, var1, var2):
self.assertTrue(np.array_equal(var1.numpy(), var2.numpy()))
np.testing.assert_array_equal(var1.numpy(), var2.numpy())
class BufferNetWithModification(paddle.nn.Layer):
......@@ -414,8 +414,8 @@ class TestModifiedBuffer(unittest.TestCase):
st_outs = self._run(True)
for i in range(len(dy_outs)):
self.assertTrue(
np.array_equal(dy_outs[i].numpy(), st_outs[i].numpy()))
np.testing.assert_array_equal(dy_outs[i].numpy(),
st_outs[i].numpy())
def test_modified(self):
with _test_eager_guard():
......
......@@ -91,9 +91,8 @@ class TestBeamSearchDecodeOp(unittest.TestCase):
expected_data = np.array(
[0, 2, 3, 1, 0, 2, 1, 0, 4, 5, 3, 5, 0, 4, 5, 3, 1], "int64")
self.assertTrue(np.array_equal(np.array(sentence_ids), expected_data))
self.assertTrue(np.array_equal(np.array(sentence_scores),
expected_data))
np.testing.assert_array_equal(np.array(sentence_ids), expected_data)
np.testing.assert_array_equal(np.array(sentence_scores), expected_data)
@unittest.skipIf(not core.is_compiled_with_cuda(),
......
......@@ -90,7 +90,7 @@ class TestRandomValue(unittest.TestCase):
self.assertEqual(np.sum(index1), 8582429431)
self.assertEqual(np.sum(index2), 8581445798)
expect = [0., 0., 0., 0., 0., 0., 0., 1., 1., 1.]
self.assertTrue(np.array_equal(y[16, 500, 500:510], expect))
np.testing.assert_array_equal(y[16, 500, 500:510], expect)
x = paddle.to_tensor(x_np, dtype='float32')
y = paddle.bernoulli(x).numpy()
......@@ -99,7 +99,7 @@ class TestRandomValue(unittest.TestCase):
self.assertEqual(np.sum(index1), 8583509076)
self.assertEqual(np.sum(index2), 8582778540)
expect = [0., 0., 1., 1., 1., 1., 0., 1., 1., 1.]
self.assertTrue(np.array_equal(y[16, 500, 500:510], expect))
np.testing.assert_array_equal(y[16, 500, 500:510], expect)
paddle.enable_static()
......
......@@ -118,9 +118,11 @@ class InplaceTestBase(unittest.TestCase):
fetch_val2, = exe.run(compiled_prog,
feed=feed_dict,
fetch_list=[fetch_var])
self.assertTrue(
np.array_equal(fetch_val1, fetch_val2),
"error var name: {}, fetch_val1: {}, fetch_val2: {}"
np.testing.assert_array_equal(
fetch_val1,
fetch_val2,
err_msg=
'error var name: {}, fetch_val1: {}, fetch_val2: {}'
.format(
fetch_var,
fetch_val1[~np.equal(fetch_val1, fetch_val2)],
......@@ -167,13 +169,14 @@ class InplaceTestBase(unittest.TestCase):
fetch_vals.append(fetch_val)
for item in fetch_vals:
self.assertTrue(np.array_equal(fetch_vals[0], item))
self.assertTrue(
np.array_equal(fetch_vals[0], item),
"error var name: {}, fetch_vals[0]: {}, item: {}".
format(fetch_var,
fetch_vals[0][~np.equal(fetch_vals[0], item)],
item[~np.equal(fetch_vals[0], item)]))
np.testing.assert_array_equal(fetch_vals[0], item)
np.testing.assert_array_equal(
fetch_vals[0],
item,
err_msg='error var name: {}, fetch_vals[0]: {}, item: {}'
.format(fetch_var,
fetch_vals[0][~np.equal(fetch_vals[0], item)],
item[~np.equal(fetch_vals[0], item)]))
class CUDAInplaceTest(InplaceTestBase):
......
......@@ -100,7 +100,7 @@ class TestGradientWithPrune(unittest.TestCase):
out = exe.run(main,
feed={'x': np.ones([3]).astype('float32')},
fetch_list=[x1_grad])
self.assertTrue(np.array_equal(out[0], [2., 0., 0.]))
np.testing.assert_array_equal(out[0], [2.0, 0.0, 0.0])
class TestDoubleGradient(unittest.TestCase):
......
......@@ -130,11 +130,10 @@ class TestCastOpEager(unittest.TestCase):
x = paddle.ones([2, 2], dtype="float16")
x.stop_gradient = False
out = paddle.cast(x, "float32")
self.assertTrue(
np.array_equal(out,
np.ones([2, 2]).astype("float32")))
np.testing.assert_array_equal(out,
np.ones([2, 2]).astype('float32'))
out.backward()
self.assertTrue(np.array_equal(x.gradient(), x.numpy()))
np.testing.assert_array_equal(x.gradient(), x.numpy())
self.assertTrue(x.gradient().dtype == np.float16)
......
......@@ -68,7 +68,7 @@ class TestCompiledProgram(unittest.TestCase):
"label": self.label
},
fetch_list=[loss.name])
self.assertTrue(np.array_equal(loss_data[0], self.loss))
np.testing.assert_array_equal(loss_data[0], self.loss)
def test_compiled_program_with_data_parallel(self):
with new_program_scope():
......@@ -90,7 +90,7 @@ class TestCompiledProgram(unittest.TestCase):
"label": self.label
},
fetch_list=[loss.name])
self.assertTrue(np.array_equal(loss_data[0], self.loss))
np.testing.assert_array_equal(loss_data[0], self.loss)
class TestCompiledProgramError(unittest.TestCase):
......
......@@ -447,10 +447,8 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
self.assertTrue(self.out_var.shape[self.axis] == -1)
exe = fluid.Executor(self.place)
res = exe.run(self.program, fetch_list=self.out_var)
self.assertTrue(
np.array_equal(
res[0], np.concatenate([self.x] * self.iter_num,
axis=self.axis)))
np.testing.assert_array_equal(
res[0], np.concatenate([self.x] * self.iter_num, axis=self.axis))
if __name__ == '__main__':
......
......@@ -84,7 +84,7 @@ class TestComplexConjOp(unittest.TestCase):
var_x = paddle.to_tensor(input)
result = paddle.conj(var_x).numpy()
target = np.conj(input)
self.assertTrue(np.array_equal(result, target))
np.testing.assert_array_equal(result, target)
def test_conj_operator(self):
for dtype in self._dtypes:
......@@ -96,7 +96,7 @@ class TestComplexConjOp(unittest.TestCase):
var_x = paddle.to_tensor(input)
result = var_x.conj().numpy()
target = np.conj(input)
self.assertTrue(np.array_equal(result, target))
np.testing.assert_array_equal(result, target)
def test_conj_static_mode(self):
......@@ -118,7 +118,7 @@ class TestComplexConjOp(unittest.TestCase):
exe = static.Executor(place)
out_value = exe.run(feed=input_dict, fetch_list=[out.name])
self.assertTrue(np.array_equal(np_res, out_value[0]))
np.testing.assert_array_equal(np_res, out_value[0])
def test_conj_api_real_number(self):
for dtype in self._dtypes:
......@@ -128,7 +128,7 @@ class TestComplexConjOp(unittest.TestCase):
var_x = paddle.to_tensor(input)
result = paddle.conj(var_x).numpy()
target = np.conj(input)
self.assertTrue(np.array_equal(result, target))
np.testing.assert_array_equal(result, target)
if __name__ == "__main__":
......
......@@ -174,10 +174,10 @@ class TestCUDAGraph(unittest.TestCase):
y_np = y.numpy()
y_np_expected = np.concatenate(xs_np)
self.assertTrue(np.array_equal(y_np, y_np_expected))
np.testing.assert_array_equal(y_np, y_np_expected)
self.assertEqual(len(zs), len(xs_np))
for i, z in enumerate(zs):
self.assertTrue(np.array_equal(z.numpy(), xs_np[i]))
np.testing.assert_array_equal(z.numpy(), xs_np[i])
output_dir = 'cuda_graph_dot_{}'.format(os.getpid())
try:
......@@ -233,8 +233,8 @@ class TestCUDAGraph(unittest.TestCase):
graph.replay()
actual_x = np.array([[i]]).astype(dtype)
actual_y = np.array([[i * i]]).astype(dtype)
self.assertTrue(np.array_equal(actual_x, x.numpy()))
self.assertTrue(np.array_equal(actual_y, y.numpy()))
np.testing.assert_array_equal(actual_x, x.numpy())
np.testing.assert_array_equal(actual_y, y.numpy())
def test_dev_ctx_alloc(self):
if not can_use_cuda_graph():
......
......@@ -68,9 +68,9 @@ class TestSimpleModel(unittest.TestCase):
layer, value2 = self.run_base(func, True, "default")
_, value3 = self.run_base(func, True, "new")
_, value4 = self.run_base(func, True, layer)
self.assertTrue(np.array_equal(value1, value2))
self.assertTrue(np.array_equal(value1, value3))
self.assertTrue(np.array_equal(value1, value4))
np.testing.assert_array_equal(value1, value2)
np.testing.assert_array_equal(value1, value3)
np.testing.assert_array_equal(value1, value4)
def test_layer(self):
self.check(SimpleModel(10, 20))
......
......@@ -128,7 +128,7 @@ class TestStreamGuard(unittest.TestCase):
# kernels to be completed on windows.
s.synchronize()
self.assertTrue(np.array_equal(np.array(c), np.array(d)))
np.testing.assert_array_equal(np.array(c), np.array(d))
def test_stream_guard_default_stream(self):
if paddle.is_compiled_with_cuda():
......
......@@ -31,15 +31,15 @@ class TestCumsumOp(unittest.TestCase):
y = paddle.cumsum(data)
z = np.cumsum(data_np)
self.assertTrue(np.array_equal(z, y.numpy()))
np.testing.assert_array_equal(z, y.numpy())
y = paddle.cumsum(data, axis=0)
z = np.cumsum(data_np, axis=0)
self.assertTrue(np.array_equal(z, y.numpy()))
np.testing.assert_array_equal(z, y.numpy())
y = paddle.cumsum(data, axis=-1)
z = np.cumsum(data_np, axis=-1)
self.assertTrue(np.array_equal(z, y.numpy()))
np.testing.assert_array_equal(z, y.numpy())
y = paddle.cumsum(data, dtype='float64')
self.assertTrue(y.dtype == core.VarDesc.VarType.FP64)
......@@ -49,7 +49,7 @@ class TestCumsumOp(unittest.TestCase):
y = paddle.cumsum(data, axis=-2)
z = np.cumsum(data_np, axis=-2)
self.assertTrue(np.array_equal(z, y.numpy()))
np.testing.assert_array_equal(z, y.numpy())
def run_static(self, use_gpu=False):
with fluid.program_guard(fluid.Program()):
......
......@@ -93,8 +93,8 @@ class TestClass(unittest.TestCase):
L1 = np.array(L1)
L2 = np.array(L2)
self.assertTrue(np.array_equal(I1, I2))
self.assertTrue(np.array_equal(L1, L2))
np.testing.assert_array_equal(I1, I2)
np.testing.assert_array_equal(L1, L2)
batch_id += 1
if break_beforehand and batch_id >= int(
......
......@@ -138,9 +138,8 @@ class TestDygraph(unittest.TestCase):
np.array([[1, 3], [3, 5]]).astype(np.float32))
y1 = fluid.dygraph.to_variable(
np.array([[2, 5], [6, 8]]).astype(np.float32))
self.assertTrue(
np.array_equal(
paddle.dot(x1, y1).numpy(), np.array([[17], [58]])))
np.testing.assert_array_equal(
paddle.dot(x1, y1).numpy(), np.array([[17], [58]]))
class TestComplexDotOp(OpTest):
......
......@@ -1013,10 +1013,9 @@ class TestDropoutBackward(unittest.TestCase):
out, mask = core.ops.dropout(input, 'dropout_prob', 0.5)
out.backward()
self.assertTrue(
np.array_equal(
input.gradient(),
self.cal_grad_downscale_in_infer(mask.numpy())))
np.testing.assert_array_equal(
input.gradient(),
self.cal_grad_downscale_in_infer(mask.numpy()))
def test_backward_downscale_in_infer_eager(self):
for place in self.places:
......@@ -1027,10 +1026,9 @@ class TestDropoutBackward(unittest.TestCase):
out, mask = _C_ops.final_state_dropout(
input, None, 0.5, False, "downgrade_in_infer", 0, False)
out.backward()
self.assertTrue(
np.array_equal(
input.gradient(),
self.cal_grad_downscale_in_infer(mask.numpy())))
np.testing.assert_array_equal(
input.gradient(),
self.cal_grad_downscale_in_infer(mask.numpy()))
def test_backward_upscale_train(self):
_enable_legacy_dygraph()
......
......@@ -82,7 +82,7 @@ class TestDynRNNStopGradient(unittest.TestCase):
value2 = build_and_run_program(place, self.batch_size,
self.beam_size, True)
self.assertTrue(np.array_equal(value1, value2))
np.testing.assert_array_equal(value1, value2)
def test_check_main(self):
places = [fluid.CPUPlace()]
......
......@@ -108,11 +108,11 @@ class TestRunProgram(unittest.TestCase):
loss = paddle.mean(out_t)
loss.backward()
self.assertTrue(np.array_equal(np.ones([2, 2]) * 4, out_t.numpy()))
self.assertTrue(
np.array_equal(np.ones([2, 4]) * 0.5, x_t.grad.numpy()))
self.assertTrue(
np.array_equal(np.ones([4, 2]) * 0.5, y_t.grad.numpy()))
np.testing.assert_array_equal(np.ones([2, 2]) * 4, out_t.numpy())
np.testing.assert_array_equal(
np.ones([2, 4]) * 0.5, x_t.grad.numpy())
np.testing.assert_array_equal(
np.ones([4, 2]) * 0.5, y_t.grad.numpy())
if __name__ == '__main__':
......
......@@ -32,7 +32,7 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase):
out_arr = out.numpy()
out_arr_expected = np.add(np_x, np_y)
self.assertTrue(np.array_equal(out_arr, out_arr_expected))
np.testing.assert_array_equal(out_arr, out_arr_expected)
def test_sum(self):
with _test_eager_guard():
......@@ -42,7 +42,7 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase):
out = paddle.sum(x, axis=0)
out_arr = out.numpy()
out_arr_expected = np.sum(x_data, axis=0)
self.assertTrue(np.array_equal(out_arr, out_arr_expected))
np.testing.assert_array_equal(out_arr, out_arr_expected)
def test_mm(self):
with _test_eager_guard():
......
......@@ -40,28 +40,28 @@ class EagerStringTensorTestCase(unittest.TestCase):
ST2 = core.eager.StringTensor(shape, "ST2") # constructor 2
self.assertEqual(ST2.name, "ST2")
self.assertEqual(ST2.shape, shape)
self.assertTrue(
np.array_equal(ST2.numpy(), np.empty(shape, dtype=np.unicode_)))
np.testing.assert_array_equal(ST2.numpy(),
np.empty(shape, dtype=np.unicode_))
ST3 = core.eager.StringTensor(self.str_arr, "ST3") # constructor 3
self.assertEqual(ST3.name, "ST3")
self.assertEqual(ST3.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST3.numpy(), self.str_arr))
np.testing.assert_array_equal(ST3.numpy(), self.str_arr)
ST4 = core.eager.StringTensor(self.str_arr) # constructor 4
self.assertEqual(ST4.name, "generated_string_tensor_1")
self.assertEqual(ST4.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST4.numpy(), self.str_arr))
np.testing.assert_array_equal(ST4.numpy(), self.str_arr)
ST5 = core.eager.StringTensor(ST4) # constructor 5
self.assertEqual(ST5.name, "generated_string_tensor_2")
self.assertEqual(ST5.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST5.numpy(), self.str_arr))
np.testing.assert_array_equal(ST5.numpy(), self.str_arr)
ST6 = core.eager.StringTensor(ST5, "ST6") # constructor 6
self.assertEqual(ST6.name, "ST6")
self.assertEqual(ST6.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST6.numpy(), self.str_arr))
np.testing.assert_array_equal(ST6.numpy(), self.str_arr)
for st in [ST1, ST2, ST3, ST4, ST5, ST6]:
# All StringTensors are on cpu place so far.
......@@ -74,25 +74,25 @@ class EagerStringTensorTestCase(unittest.TestCase):
name="ST1") # constructor 2
self.assertEqual(ST1.name, "ST1")
self.assertEqual(ST1.shape, shape)
self.assertTrue(
np.array_equal(ST1.numpy(), np.empty(shape, dtype=np.unicode_)))
np.testing.assert_array_equal(ST1.numpy(),
np.empty(shape, dtype=np.unicode_))
ST2 = core.eager.StringTensor(self.str_arr,
name="ST2") # constructor 3
self.assertEqual(ST2.name, "ST2")
self.assertEqual(ST2.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST2.numpy(), self.str_arr))
np.testing.assert_array_equal(ST2.numpy(), self.str_arr)
ST3 = core.eager.StringTensor(ST2, name="ST3") # constructor 6
self.assertEqual(ST3.name, "ST3")
self.assertEqual(ST3.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST3.numpy(), self.str_arr))
np.testing.assert_array_equal(ST3.numpy(), self.str_arr)
ST4 = core.eager.StringTensor(value=ST2,
name="ST4") # constructor 6
self.assertEqual(ST4.name, "ST4")
self.assertEqual(ST4.shape, list(self.str_arr.shape))
self.assertTrue(np.array_equal(ST4.numpy(), self.str_arr))
np.testing.assert_array_equal(ST4.numpy(), self.str_arr)
for st in [ST1, ST2, ST3, ST4]:
# All StringTensors are on cpu place so far.
self.assertTrue(st.place._equals(core.CPUPlace()))
......
......@@ -206,11 +206,15 @@ class TestElementwiseMinOpFP16(unittest.TestCase):
z_1, x_g_1, y_g_1 = self.get_out_and_grad(x_np, y_np, axis, place,
False)
z_2, x_g_2, y_g_2 = self.get_out_and_grad(x_np, y_np, axis, place, True)
self.assertTrue(np.array_equal(z_1, z_2), "{} vs {}".format(z_1, z_2))
self.assertTrue(np.array_equal(x_g_1, x_g_2),
"{} vs {}".format(x_g_1, x_g_2))
self.assertTrue(np.array_equal(y_g_1, y_g_2),
"{} vs {}".format(y_g_1, y_g_2))
np.testing.assert_array_equal(z_1,
z_2,
err_msg='{} vs {}'.format(z_1, z_2))
np.testing.assert_array_equal(x_g_1,
x_g_2,
err_msg='{} vs {}'.format(x_g_1, x_g_2))
np.testing.assert_array_equal(y_g_1,
y_g_2,
err_msg='{} vs {}'.format(y_g_1, y_g_2))
def test_main(self):
self.check_main((13, 17), (13, 17))
......
......@@ -210,9 +210,9 @@ class TestElementwisePowGradOpInt(unittest.TestCase):
y.stop_gradient = False
res = x**y
res.backward()
self.assertTrue(np.array_equal(res.gradient(), self.grad_res))
self.assertTrue(np.array_equal(x.gradient(), self.grad_x))
self.assertTrue(np.array_equal(y.gradient(), self.grad_y))
np.testing.assert_array_equal(res.gradient(), self.grad_res)
np.testing.assert_array_equal(x.gradient(), self.grad_x)
np.testing.assert_array_equal(y.gradient(), self.grad_y)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False})
......
......@@ -36,7 +36,7 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase):
for p in self.get_places():
grad_value1 = self.run_program(p, stop_gradient=False)
grad_value2 = self.run_program(p, stop_gradient=True)
self.assertTrue(np.array_equal(grad_value1, grad_value2))
np.testing.assert_array_equal(grad_value1, grad_value2)
def run_program(self, place, stop_gradient=False):
np.random.seed(1)
......
......@@ -50,7 +50,7 @@ class TestCheckFetchList(unittest.TestCase):
fetch_list=[self.fetch_list], # support single list/tuple
return_numpy=True)
self.assertTrue(np.array_equal(res[0], self.expected))
np.testing.assert_array_equal(res[0], self.expected)
def test_with_error(self):
with self.assertRaises(TypeError):
......
......@@ -65,7 +65,7 @@ class TestExecutorReturnTensorNotOverwritingWithOptest(OpTest):
add_out1 = np.array(add_out[0])
mul_out = self.calc_mul_out(place, parallel)
add_out2 = np.array(add_out[0])
self.assertTrue(np.array_equal(add_out1, add_out2))
np.testing.assert_array_equal(add_out1, add_out2)
class TestExecutorReturnTensorNotOverOverwritingWithLayers(unittest.TestCase):
......@@ -108,7 +108,7 @@ class TestExecutorReturnTensorNotOverOverwritingWithLayers(unittest.TestCase):
add_out1 = np.array(add_out[0])
sub_out = self.calc_sub_out(place, parallel)
add_out2 = np.array(add_out[0])
self.assertTrue(np.array_equal(add_out1, add_out2))
np.testing.assert_array_equal(add_out1, add_out2)
if __name__ == '__main__':
......
......@@ -272,10 +272,8 @@ class TestExpandDygraphAPI(unittest.TestCase):
c = paddle.fluid.layers.expand(a,
expand_times=paddle.to_tensor(
[2, 3], dtype='int32'))
self.assertTrue(
np.array_equal(b.numpy(), np.tile(a.numpy(), [2, 3])))
self.assertTrue(
np.array_equal(c.numpy(), np.tile(a.numpy(), [2, 3])))
np.testing.assert_array_equal(b.numpy(), np.tile(a.numpy(), [2, 3]))
np.testing.assert_array_equal(c.numpy(), np.tile(a.numpy(), [2, 3]))
if __name__ == "__main__":
......
......@@ -277,11 +277,11 @@ class TestExpandV2DygraphAPI(unittest.TestCase):
np_array = np.array([2, 5])
expand_2 = paddle.expand(a, shape=np_array)
self.assertTrue(
np.array_equal(egr_expand_1.numpy(), egr_expand_2.numpy()))
self.assertTrue(np.array_equal(expand_1.numpy(), expand_2.numpy()))
self.assertTrue(
np.array_equal(expand_1.numpy(), egr_expand_1.numpy()))
np.testing.assert_array_equal(egr_expand_1.numpy(),
egr_expand_2.numpy())
np.testing.assert_array_equal(expand_1.numpy(), expand_2.numpy())
np.testing.assert_array_equal(expand_1.numpy(),
egr_expand_1.numpy())
if __name__ == "__main__":
......
......@@ -94,7 +94,7 @@ class TestExponentialAPI(unittest.TestCase):
self.assertTrue(np.min(y.numpy()) >= 0)
y.backward()
self.assertTrue(np.array_equal(x.grad.numpy(), np.zeros([10, 10])))
np.testing.assert_array_equal(x.grad.numpy(), np.zeros([10, 10]))
paddle.enable_static()
def test_fixed_random_number(self):
......
......@@ -173,7 +173,7 @@ class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase):
res_1 = run_program(-1)
res_2 = run_program(2)
self.assertTrue(np.array_equal(res_1, res_2))
np.testing.assert_array_equal(res_1, res_2)
class TestFCOpError(unittest.TestCase):
......
......@@ -17,14 +17,14 @@ from __future__ import print_function
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import op_test
import numpy
import numpy as np
import unittest
class TestFetchVar(unittest.TestCase):
def set_input(self):
self.val = numpy.array([1, 3, 5]).astype(numpy.int32)
self.val = np.array([1, 3, 5]).astype(np.int32)
def test_fetch_var(self):
self.set_input()
......@@ -33,15 +33,17 @@ class TestFetchVar(unittest.TestCase):
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_main_program(), feed={}, fetch_list=[])
fetched_x = fluid.executor._fetch_var("x")
self.assertTrue(numpy.array_equal(fetched_x, self.val),
"fetch_x=%s val=%s" % (fetched_x, self.val))
np.testing.assert_array_equal(fetched_x,
self.val,
err_msg='fetch_x=%s val=%s' %
(fetched_x, self.val))
self.assertEqual(fetched_x.dtype, self.val.dtype)
class TestFetchNullVar(TestFetchVar):
def set_input(self):
self.val = numpy.array([]).astype(numpy.int32)
self.val = np.array([]).astype(np.int32)
if __name__ == '__main__':
......
......@@ -110,7 +110,7 @@ class TestFillAnyInplace(unittest.TestCase):
y = 2 * x
y.fill_(1)
y.backward()
self.assertTrue(np.array_equal(x.grad.numpy(), np.zeros([10, 10])))
np.testing.assert_array_equal(x.grad.numpy(), np.zeros([10, 10]))
if __name__ == "__main__":
......
......@@ -127,7 +127,7 @@ class TestFillConstantOpWithSelectedRows(unittest.TestCase):
result_array = np.array(out.get_tensor())
full_array = np.full((123, 92), 3.8, 'float32')
self.assertTrue(np.array_equal(result_array, full_array))
np.testing.assert_array_equal(result_array, full_array)
def test_fill_constant_with_selected_rows(self):
places = [core.CPUPlace()]
......
......@@ -78,7 +78,7 @@ class TestFillOp3(unittest.TestCase):
result_array = np.array(out)
full_array = np.array(val, 'float32')
self.assertTrue(np.array_equal(result_array, full_array))
np.testing.assert_array_equal(result_array, full_array)
def test_fill_op(self):
places = [core.CPUPlace()]
......
......@@ -97,19 +97,19 @@ class TestFusedMatmulBias(unittest.TestCase):
z = fused_matmul_bias(x, y, bias, trans_x, trans_y)
z_np = matmul(x_np, y_np, bias_np, trans_x, trans_y)
self.assertTrue(np.array_equal(z.numpy(), z_np))
np.testing.assert_array_equal(z.numpy(), z_np)
z_grad_np = self.rand_data(z_np.shape, dtype)
paddle.autograd.backward(z, grad_tensors=[paddle.to_tensor(z_grad_np)])
x_grad_np, y_grad_np, bias_grad_np = matmul_grad(
x_np, y_np, bias_np, z_grad_np, trans_x, trans_y)
self.assertTrue(np.array_equal(x.grad.numpy(), x_grad_np))
np.testing.assert_array_equal(x.grad.numpy(), x_grad_np)
self.assertEqual(y_grad_np.shape, y_np.shape)
self.assertTrue(np.array_equal(y.grad.numpy(), y_grad_np))
np.testing.assert_array_equal(y.grad.numpy(), y_grad_np)
if need_bias:
self.assertTrue(np.array_equal(bias.grad.numpy(), bias_grad_np))
np.testing.assert_array_equal(bias.grad.numpy(), bias_grad_np)
else:
self.assertTrue(bias_grad_np is None)
......@@ -138,7 +138,7 @@ class TestFusedLinear(unittest.TestCase):
linear = FusedLinear(40, 50, transpose_weight=transpose)
y1 = linear(x)
y2 = fused_linear(x, linear.weight, linear.bias, transpose)
self.assertTrue(np.array_equal(y1.numpy(), y2.numpy()))
np.testing.assert_array_equal(y1.numpy(), y2.numpy())
def test_non_transpose(self):
self.check_fused_linear(False)
......
......@@ -341,7 +341,7 @@ class API_TestDygraphGather(unittest.TestCase):
gpu_value = gpu_exe.run(feed=feed, fetch_list=fetch)[0]
return gpu_value
self.assertTrue(np.array_equal(test_dygraph(), test_static_graph()))
np.testing.assert_array_equal(test_dygraph(), test_static_graph())
class TestGathertError(unittest.TestCase):
......
......@@ -324,8 +324,8 @@ class TestAmpScaler(unittest.TestCase):
for param in model.parameters():
# param not update when tensor contains nan or inf
self.assertTrue(
np.array_equal(param.numpy(), params_init[param.name]))
np.testing.assert_array_equal(param.numpy(),
params_init[param.name])
def test_nan_inf(self):
self.nan_inf()
......@@ -974,7 +974,7 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase):
fetch_list=fetch_targets)
print("pred.numpy()", pred.numpy())
print("result", results[0])
self.assertTrue(np.array_equal(pred.numpy(), results[0]))
np.testing.assert_array_equal(pred.numpy(), results[0])
paddle.disable_static()
def test_inference_save_load(self):
......
......@@ -323,8 +323,8 @@ class TestAmpScaler(unittest.TestCase):
for param in model.parameters():
# param not update when tensor contains nan or inf
self.assertTrue(
np.array_equal(param.numpy(), params_init[param.name]))
np.testing.assert_array_equal(param.numpy(),
params_init[param.name])
def test_nan_inf(self):
self.nan_inf()
......@@ -965,7 +965,7 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase):
fetch_list=fetch_targets)
print("pred.numpy()", pred.numpy())
print("result", results[0])
self.assertTrue(np.array_equal(pred.numpy(), results[0]))
np.testing.assert_array_equal(pred.numpy(), results[0])
paddle.disable_static()
def test_inference_save_load(self):
......
......@@ -314,8 +314,8 @@ class TestImperativeAutoPrune(unittest.TestCase):
learning_rate=0.003,
parameter_list=(linear.parameters() + linear2.parameters()))
optimizer.minimize(out2)
self.assertTrue(
np.array_equal(linear2_origin, linear2.weight.numpy()))
np.testing.assert_array_equal(linear2_origin,
linear2.weight.numpy())
self.assertFalse(
np.array_equal(linear_origin, linear.weight.numpy()))
......@@ -344,10 +344,9 @@ class TestImperativeAutoPrune(unittest.TestCase):
learning_rate=0.003,
parameter_list=(linear.parameters() + linear2.parameters()))
optimizer.minimize(out2)
self.assertTrue(
np.array_equal(linear2_origin, linear2.weight.numpy()))
self.assertTrue(np.array_equal(linear_origin,
linear.weight.numpy()))
np.testing.assert_array_equal(linear2_origin,
linear2.weight.numpy())
np.testing.assert_array_equal(linear_origin, linear.weight.numpy())
try:
linear2.weight.gradient()
except ValueError as e:
......
......@@ -159,8 +159,8 @@ class TestImperative(unittest.TestCase):
out.backward()
dy_grad2 = mlp._linear1.weight.gradient()
self.assertFalse(fluid.dygraph.enabled())
self.assertTrue(np.array_equal(dy_out1, dy_out2))
self.assertTrue(np.array_equal(dy_grad1, dy_grad2))
np.testing.assert_array_equal(dy_out1, dy_out2)
np.testing.assert_array_equal(dy_grad1, dy_grad2)
def test_functional_dygraph_context(self):
with _test_eager_guard():
......@@ -190,8 +190,8 @@ class TestImperative(unittest.TestCase):
dy_grad2 = mlp._linear1.weight.gradient()
paddle.enable_static()
self.assertFalse(paddle.in_dynamic_mode())
self.assertTrue(np.array_equal(dy_out1, dy_out2))
self.assertTrue(np.array_equal(dy_grad1, dy_grad2))
np.testing.assert_array_equal(dy_out1, dy_out2)
np.testing.assert_array_equal(dy_grad1, dy_grad2)
def test_functional_paddle_imperative_dygraph_context(self):
with _test_eager_guard():
......@@ -229,12 +229,12 @@ class TestImperative(unittest.TestCase):
egr_tmp5 = fluid.core.eager.Tensor(value=x)
egr_tmp6 = fluid.core.eager.Tensor(t)
self.assertTrue(np.array_equal(x, egr_tmp.numpy()))
self.assertTrue(np.array_equal(y, egr_tmp2.numpy()))
self.assertTrue(np.array_equal(x, egr_tmp3.numpy()))
self.assertTrue(np.array_equal(y, egr_tmp4.numpy()))
self.assertTrue(np.array_equal(x, egr_tmp5.numpy()))
self.assertTrue(np.array_equal(x, egr_tmp6.numpy()))
np.testing.assert_array_equal(x, egr_tmp.numpy())
np.testing.assert_array_equal(y, egr_tmp2.numpy())
np.testing.assert_array_equal(x, egr_tmp3.numpy())
np.testing.assert_array_equal(y, egr_tmp4.numpy())
np.testing.assert_array_equal(x, egr_tmp5.numpy())
np.testing.assert_array_equal(x, egr_tmp6.numpy())
else:
tmp = fluid.core.VarBase(value=x, place=fluid.core.CPUPlace())
tmp2 = fluid.core.VarBase(y, fluid.core.CPUPlace())
......@@ -243,12 +243,12 @@ class TestImperative(unittest.TestCase):
tmp5 = fluid.core.VarBase(value=x)
tmp6 = fluid.core.VarBase(t)
self.assertTrue(np.array_equal(x, tmp.numpy()))
self.assertTrue(np.array_equal(y, tmp2.numpy()))
self.assertTrue(np.array_equal(x, tmp3.numpy()))
self.assertTrue(np.array_equal(y, tmp4.numpy()))
self.assertTrue(np.array_equal(x, tmp5.numpy()))
self.assertTrue(np.array_equal(x, tmp6.numpy()))
np.testing.assert_array_equal(x, tmp.numpy())
np.testing.assert_array_equal(y, tmp2.numpy())
np.testing.assert_array_equal(x, tmp3.numpy())
np.testing.assert_array_equal(y, tmp4.numpy())
np.testing.assert_array_equal(x, tmp5.numpy())
np.testing.assert_array_equal(x, tmp6.numpy())
def test_create_varbase(self):
with fluid.dygraph.guard():
......@@ -479,10 +479,10 @@ class TestImperative(unittest.TestCase):
feed={inp.name: np_inp},
fetch_list=[x.name, param_grads[1].name])
self.assertTrue(np.array_equal(dy_out, static_out))
self.assertTrue(np.array_equal(dy_grad, static_grad))
self.assertTrue(np.array_equal(dy_out2, static_out))
self.assertTrue(np.array_equal(dy_grad2, static_grad))
np.testing.assert_array_equal(dy_out, static_out)
np.testing.assert_array_equal(dy_grad, static_grad)
np.testing.assert_array_equal(dy_out2, static_out)
np.testing.assert_array_equal(dy_grad2, static_grad)
def test_layer_in_out(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
......@@ -577,9 +577,9 @@ class TestImperative(unittest.TestCase):
loss2 = x * z
loss1.backward(retain_graph=True)
loss2.backward(retain_graph=True)
self.assertTrue(np.array_equal(x.grad.numpy(), [23.]))
self.assertTrue(np.array_equal(y.grad.numpy(), [25.]))
self.assertTrue(np.array_equal(z.grad.numpy(), [5.]))
np.testing.assert_array_equal(x.grad.numpy(), [23.0])
np.testing.assert_array_equal(y.grad.numpy(), [25.0])
np.testing.assert_array_equal(z.grad.numpy(), [5.0])
x.clear_grad()
y.clear_grad()
z.clear_grad()
......@@ -592,13 +592,13 @@ class TestImperative(unittest.TestCase):
loss = fun(x, y, z)
loss.backward(retain_graph=True)
# x.grad = 2*x*y + z + 2*y = 27
self.assertTrue(np.array_equal(x.grad.numpy(), [27]))
np.testing.assert_array_equal(x.grad.numpy(), [27])
loss.backward(retain_graph=True)
self.assertTrue(np.array_equal(x.grad.numpy(), [54]))
np.testing.assert_array_equal(x.grad.numpy(), [54])
loss.backward()
self.assertTrue(np.array_equal(x.grad.numpy(), [81]))
np.testing.assert_array_equal(x.grad.numpy(), [81])
with self.assertRaises(RuntimeError):
loss.backward()
......@@ -608,8 +608,8 @@ class TestImperative(unittest.TestCase):
dx = paddle.grad([loss1], x, create_graph=True)[0]
loss = loss1 + loss2 + dx
loss.backward()
self.assertTrue(np.array_equal(dx.grad.numpy(), [1]))
self.assertTrue(np.array_equal(x.grad.numpy(), [108]))
np.testing.assert_array_equal(dx.grad.numpy(), [1])
np.testing.assert_array_equal(x.grad.numpy(), [108])
def test_mlp(sort_sum_gradient):
fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient})
......@@ -641,7 +641,7 @@ class TestImperative(unittest.TestCase):
loss = mlp1(x)
loss.backward()
self.assertTrue(np.array_equal(loss.grad.numpy(), [1]))
np.testing.assert_array_equal(loss.grad.numpy(), [1])
self.assertTrue(
np.allclose(mlp1._linear1.weight.grad.numpy(),
expected_weight1_grad))
......@@ -656,7 +656,7 @@ class TestImperative(unittest.TestCase):
expected_bias2_grad))
mlp2.clear_gradients()
self.assertTrue(np.array_equal(clear_loss.grad.numpy(), [1]))
np.testing.assert_array_equal(clear_loss.grad.numpy(), [1])
if ((batch_id + 1) % 10) % 2 == 0:
mlp1.clear_gradients()
expected_weight1_grad = 0.
......@@ -785,14 +785,14 @@ class TestImperative(unittest.TestCase):
param_grads[1][1].name, param_grads[2][1].name
])
self.assertTrue(np.array_equal(dy_out, static_out))
self.assertTrue(np.array_equal(dy_grad_h2o, static_grad_h2o))
self.assertTrue(np.array_equal(dy_grad_h2h, static_grad_h2h))
self.assertTrue(np.array_equal(dy_grad_i2h, static_grad_i2h))
self.assertTrue(np.array_equal(dy_out2, static_out))
self.assertTrue(np.array_equal(dy_grad_h2o2, static_grad_h2o))
self.assertTrue(np.array_equal(dy_grad_h2h2, static_grad_h2h))
self.assertTrue(np.array_equal(dy_grad_i2h2, static_grad_i2h))
np.testing.assert_array_equal(dy_out, static_out)
np.testing.assert_array_equal(dy_grad_h2o, static_grad_h2o)
np.testing.assert_array_equal(dy_grad_h2h, static_grad_h2h)
np.testing.assert_array_equal(dy_grad_i2h, static_grad_i2h)
np.testing.assert_array_equal(dy_out2, static_out)
np.testing.assert_array_equal(dy_grad_h2o2, static_grad_h2o)
np.testing.assert_array_equal(dy_grad_h2h2, static_grad_h2h)
np.testing.assert_array_equal(dy_grad_i2h2, static_grad_i2h)
def test_rnn(self):
with _test_eager_guard():
......@@ -846,7 +846,7 @@ class TestDygraphUtils(unittest.TestCase):
a = paddle.to_tensor(a_np)
res1 = func(a, act="hard_sigmoid")
res2 = fluid.layers.hard_sigmoid(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
np.testing.assert_array_equal(res1.numpy(), res2.numpy())
def test_append_activation_in_dygraph1(self):
with _test_eager_guard():
......@@ -875,7 +875,7 @@ class TestDygraphUtils(unittest.TestCase):
a = paddle.to_tensor(a_np)
res1 = func(a, act="sigmoid", use_cudnn=True)
res2 = fluid.layers.sigmoid(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
np.testing.assert_array_equal(res1.numpy(), res2.numpy())
def test_append_activation_in_dygraph3(self):
with _test_eager_guard():
......@@ -892,7 +892,7 @@ class TestDygraphUtils(unittest.TestCase):
a = paddle.to_tensor(a_np)
res1 = func(a)
res2 = fluid.layers.relu(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
np.testing.assert_array_equal(res1.numpy(), res2.numpy())
def test_append_activation_in_dygraph_use_mkldnn(self):
with _test_eager_guard():
......@@ -911,7 +911,7 @@ class TestDygraphUtils(unittest.TestCase):
finally:
fluid.set_flags({'FLAGS_use_mkldnn': False})
res2 = fluid.layers.relu(a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
np.testing.assert_array_equal(res1.numpy(), res2.numpy())
def test_append_activation_in_dygraph_global_use_mkldnn(self):
with _test_eager_guard():
......@@ -937,7 +937,7 @@ class TestDygraphUtils(unittest.TestCase):
a = paddle.to_tensor(a_np)
res1 = func(a, bias=a)
res2 = paddle.add(a, a)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
np.testing.assert_array_equal(res1.numpy(), res2.numpy())
def test_append_bias_in_dygraph(self):
with _test_eager_guard():
......
......@@ -58,8 +58,8 @@ class TestDataParallelStateDict(unittest.TestCase):
for k, v in single_state.items():
self.assertTrue(k in parallel_state)
self.assertTrue(
np.array_equal(v.numpy(), parallel_state[k].numpy()))
np.testing.assert_array_equal(v.numpy(),
parallel_state[k].numpy())
base_para[k] = v.numpy()
......@@ -75,7 +75,7 @@ class TestDataParallelStateDict(unittest.TestCase):
parallel_state = parallel_mlp.state_dict()
for k, v in parallel_state.items():
self.assertTrue(np.array_equal(v.numpy(), base_para[k]))
np.testing.assert_array_equal(v.numpy(), base_para[k])
parallel_mlp.load_dict(base_para)
......
......@@ -186,9 +186,8 @@ class TestEagerGrad(TestCase):
out4 = paddle.mean(out3)
egr_dout2, egr_dout3 = paddle.grad([out4], [out2, out3])
self.assertTrue(
np.array_equal(dout2_record_by_hook[0].numpy(),
np.array([1., 2.])))
np.testing.assert_array_equal(dout2_record_by_hook[0].numpy(),
np.array([1.0, 2.0]))
x1 = paddle.to_tensor([1.0, 2.0])
x1.stop_gradient = False
......@@ -203,8 +202,8 @@ class TestEagerGrad(TestCase):
self.assertEqual(dout2.stop_gradient, egr_dout2.stop_gradient)
self.assertEqual(dout3.stop_gradient, egr_dout3.stop_gradient)
self.assertTrue(np.array_equal(dout2.numpy(), egr_dout2.numpy()))
self.assertTrue(np.array_equal(dout3.numpy(), egr_dout3.numpy()))
np.testing.assert_array_equal(dout2.numpy(), egr_dout2.numpy())
np.testing.assert_array_equal(dout3.numpy(), egr_dout3.numpy())
class TestDygraphDoubleGrad(TestCase):
......@@ -392,15 +391,13 @@ class TestDygraphDoubleGrad(TestCase):
if grad_y is not None:
self.assertTrue(grad_y.stop_gradient)
self.assertTrue(
np.array_equal(grad_y.numpy(),
original_random_grad_y))
np.testing.assert_array_equal(grad_y.numpy(),
original_random_grad_y)
if grad_z is not None:
self.assertTrue(grad_z.stop_gradient)
self.assertTrue(
np.array_equal(grad_z.numpy(),
original_random_grad_z))
np.testing.assert_array_equal(grad_z.numpy(),
original_random_grad_z)
def test_none_one_initial_gradient(self):
with _test_eager_guard():
......@@ -583,7 +580,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase):
grad_2 = a.gradient()
self.assertTrue(np.array_equal(grad_1, grad_2))
np.testing.assert_array_equal(grad_1, grad_2)
def test_compare(self):
with _test_eager_guard():
......@@ -647,8 +644,8 @@ class TestDoubleGradResNet(TestCase):
g_numpy = g.numpy()
self.assertEqual(list(g_numpy.shape), list(out.shape))
self.assertTrue(np.array_equal(egr_out, out))
self.assertTrue(np.array_equal(egr_g_numpy, g_numpy))
np.testing.assert_array_equal(egr_out, out)
np.testing.assert_array_equal(egr_g_numpy, g_numpy)
@dygraph_guard
def test_resnet_resnet101(self):
......@@ -679,8 +676,8 @@ class TestDoubleGradResNet(TestCase):
g_numpy = g.numpy()
self.assertEqual(list(g_numpy.shape), list(out.shape))
self.assertTrue(np.array_equal(egr_out, out))
self.assertTrue(np.array_equal(egr_g_numpy, g_numpy))
np.testing.assert_array_equal(egr_out, out)
np.testing.assert_array_equal(egr_g_numpy, g_numpy)
class TestDoubleGradBasics(TestCase):
......@@ -705,22 +702,22 @@ class TestDoubleGradBasics(TestCase):
new_x_g.backward()
out_ref = np.ones([3, 3]) * 12.0
self.assertTrue(np.array_equal(out.numpy(), out_ref))
np.testing.assert_array_equal(out.numpy(), out_ref)
new_x_g_ref = np.ones([3, 3]) * 6.0
new_y_g_ref = np.ones([3, 3]) * 6.0
self.assertTrue(np.array_equal(new_x_g.numpy(), new_x_g_ref))
self.assertTrue(np.array_equal(new_y_g.numpy(), new_y_g_ref))
np.testing.assert_array_equal(new_x_g.numpy(), new_x_g_ref)
np.testing.assert_array_equal(new_y_g.numpy(), new_y_g_ref)
x_grad_ref = np.ones([3, 3]) * 0.0
self.assertTrue(np.array_equal(x.grad.numpy(), x_grad_ref))
np.testing.assert_array_equal(x.grad.numpy(), x_grad_ref)
y_grad_ref = np.ones([3, 3]) * 3.0
self.assertTrue(np.array_equal(y.grad.numpy(), y_grad_ref))
np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref)
grad_out_grad_ref = np.ones([3, 3]) * 6.0
self.assertTrue(
np.array_equal(grad_out.grad.numpy(), grad_out_grad_ref))
np.testing.assert_array_equal(grad_out.grad.numpy(),
grad_out_grad_ref)
if __name__ == '__main__':
......
......@@ -95,29 +95,27 @@ class Test_Forward_Hook(unittest.TestCase):
forward_pre_hook_handle1 = simplenet.register_forward_pre_hook(
forward_pre_hook1)
outs_pre_hook = simplenet(input, y)
self.assertTrue(
np.array_equal(outs_pre_hook.numpy(), outs_origin1.numpy()))
np.testing.assert_array_equal(outs_pre_hook.numpy(),
outs_origin1.numpy())
# remove forward_pre_hook
forward_pre_hook_handle1.remove()
outs_pre_hook = simplenet(input, y)
self.assertTrue(
np.array_equal(outs_pre_hook.numpy(), outs_origin.numpy()))
np.testing.assert_array_equal(outs_pre_hook.numpy(),
outs_origin.numpy())
# register forward_posst_hook
forward_post_hook_handle1 = simplenet.register_forward_post_hook(
forward_post_hook1)
outs_forward_hook = simplenet(input, y)
self.assertTrue(
np.array_equal(outs_forward_hook.numpy(),
outs_origin.numpy() * 2))
np.testing.assert_array_equal(outs_forward_hook.numpy(),
outs_origin.numpy() * 2)
# remove forward_post_hook
forward_post_hook_handle1.remove()
outs_forward_hook = simplenet(input, y)
self.assertTrue(
np.array_equal(outs_forward_hook.numpy(),
outs_origin.numpy()))
np.testing.assert_array_equal(outs_forward_hook.numpy(),
outs_origin.numpy())
# test forward_pre_hook and forward_post_hook that don't have return value
def func_forward_hook(self):
......
......@@ -218,7 +218,7 @@ class TestDygraphLoadStatic(unittest.TestCase):
my_test = MyTest()
my_test.set_dict(new_dict, use_structured_name=False)
for k, v in my_test.state_dict().items():
self.assertTrue(np.array_equal(v.numpy(), new_dict[v.name]))
np.testing.assert_array_equal(v.numpy(), new_dict[v.name])
temp_dir.cleanup()
......
......@@ -200,10 +200,9 @@ class TestDygraphSimpleNet(unittest.TestCase):
self.assertTrue(
np.allclose(static_loss_value, dy_loss_value, rtol=1e-3))
for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key]))
np.testing.assert_array_equal(value, dy_param_init[key])
for key, value in six.iteritems(static_param_updated):
self.assertTrue(np.array_equal(value,
dy_param_updated[key]))
np.testing.assert_array_equal(value, dy_param_updated[key])
if __name__ == '__main__':
......
......@@ -13,10 +13,11 @@
# limitations under the License.
import unittest
import warnings
import numpy as np
import paddle.fluid as fluid
import warnings
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard
class TestImperativeNumpyBridge(unittest.TestCase):
......@@ -31,14 +32,14 @@ class TestImperativeNumpyBridge(unittest.TestCase):
w[-1].message)
# Temporally diable zero_copy
# var = fluid.dygraph.to_variable(data_np, zero_copy=True)
# self.assertTrue(np.array_equal(var.numpy(), data_np))
# np.testing.assert_array_equal(var.numpy(), data_np)
# data_np[0][0] = 4
# self.assertEqual(data_np[0][0], 4)
# self.assertEqual(var[0][0].numpy()[0], 4)
# self.assertTrue(np.array_equal(var.numpy(), data_np))
# np.testing.assert_array_equal(var.numpy(), data_np)
var2 = fluid.dygraph.to_variable(data_np, zero_copy=False)
self.assertTrue(np.array_equal(var2.numpy(), data_np))
np.testing.assert_array_equal(var2.numpy(), data_np)
data_np[0][0] = -1
self.assertEqual(data_np[0][0], -1)
if not _in_legacy_dygraph():
......
......@@ -573,7 +573,7 @@ class TestDygraphOCRAttention(unittest.TestCase):
self.assertTrue(np.allclose(static_out, dy_out))
for key, value in six.iteritems(static_param_init_value):
self.assertTrue(np.array_equal(value, dy_param_init_value[key]))
np.testing.assert_array_equal(value, dy_param_init_value[key])
for key, value in six.iteritems(static_param_value):
self.assertTrue(np.allclose(value, dy_param_value[key], rtol=1e-05))
......@@ -582,7 +582,7 @@ class TestDygraphOCRAttention(unittest.TestCase):
self.assertTrue(np.allclose(static_out, eager_out))
for key, value in six.iteritems(static_param_init_value):
self.assertTrue(np.array_equal(value, eager_param_init_value[key]))
np.testing.assert_array_equal(value, eager_param_init_value[key])
for key, value in six.iteritems(static_param_value):
self.assertTrue(
......
......@@ -376,15 +376,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
static_param_updated[static_param_name_list[k -
3]] = out[k]
self.assertTrue(np.array_equal(static_loss_value, dy_loss_value))
self.assertTrue(
np.array_equal(static_last_cell_value, dy_last_cell_value))
self.assertTrue(
np.array_equal(static_last_hidden_value, dy_last_hidden_value))
np.testing.assert_array_equal(static_loss_value, dy_loss_value)
np.testing.assert_array_equal(static_last_cell_value,
dy_last_cell_value)
np.testing.assert_array_equal(static_last_hidden_value,
dy_last_hidden_value)
for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key]))
np.testing.assert_array_equal(value, dy_param_init[key])
for key, value in six.iteritems(static_param_updated):
self.assertTrue(np.array_equal(value, dy_param_updated[key]))
np.testing.assert_array_equal(value, dy_param_updated[key])
if __name__ == '__main__':
......
......@@ -164,15 +164,15 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
static_param_updated[static_param_name_list[k -
3]] = out[k]
self.assertTrue(np.array_equal(static_loss_value, dy_loss_value))
self.assertTrue(
np.array_equal(static_last_cell_value, dy_last_cell_value))
self.assertTrue(
np.array_equal(static_last_hidden_value, dy_last_hidden_value))
np.testing.assert_array_equal(static_loss_value, dy_loss_value)
np.testing.assert_array_equal(static_last_cell_value,
dy_last_cell_value)
np.testing.assert_array_equal(static_last_hidden_value,
dy_last_hidden_value)
for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key]))
np.testing.assert_array_equal(value, dy_param_init[key])
for key, value in six.iteritems(static_param_updated):
self.assertTrue(np.array_equal(value, dy_param_updated[key]))
np.testing.assert_array_equal(value, dy_param_updated[key])
def test_ptb_rnn_sort_gradient(self):
with _test_eager_guard():
......
......@@ -117,10 +117,10 @@ class TestRecurrentFeed(unittest.TestCase):
static_dout = out[2]
original_np1 = static_out_value
self.assertTrue(np.array_equal(static_sum_out, sum_out_value))
self.assertTrue(np.array_equal(static_sum_out, eager_sum_out_value))
self.assertTrue(np.array_equal(static_dout, dyout))
self.assertTrue(np.array_equal(static_dout, eager_dyout))
np.testing.assert_array_equal(static_sum_out, sum_out_value)
np.testing.assert_array_equal(static_sum_out, eager_sum_out_value)
np.testing.assert_array_equal(static_dout, dyout)
np.testing.assert_array_equal(static_dout, eager_dyout)
if __name__ == '__main__':
......
......@@ -387,8 +387,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name]))
np.testing.assert_array_equal(v.numpy(),
self.base_opti[v.name])
else:
self.assertEqual(v, self.base_opti[k])
......@@ -409,7 +409,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
np.testing.assert_array_equal(new_t, base_t)
def func_testSetVariable(self):
seed = 90
......@@ -492,8 +492,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name]))
np.testing.assert_array_equal(v.numpy(),
self.base_opti[v.name])
else:
self.assertEqual(v, self.base_opti[k])
......@@ -514,7 +514,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
np.testing.assert_array_equal(new_t, base_t)
def func_testSetNumpy(self):
seed = 90
......@@ -601,8 +601,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name]))
np.testing.assert_array_equal(v.numpy(),
self.base_opti[v.name])
else:
self.assertEqual(v, self.base_opti[k])
......@@ -625,7 +625,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
np.testing.assert_array_equal(new_t, base_t)
def func_testSetVariableBeforeTrain(self):
seed = 90
......@@ -682,17 +682,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if k == "global_step":
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] + 1))
np.testing.assert_array_equal(v.numpy(),
self.base_opti[v.name] + 1)
if k.find("beta1_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(),
self.base_opti[v.name] * adam._beta1))
np.testing.assert_array_equal(
v.numpy(), self.base_opti[v.name] * adam._beta1)
if k.find("beta2_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(),
self.base_opti[v.name] * adam._beta2))
np.testing.assert_array_equal(
v.numpy(), self.base_opti[v.name] * adam._beta2)
state_dict = ptb_model.state_dict()
......@@ -700,7 +698,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
np.testing.assert_array_equal(new_t, base_t)
def func_testLoadAndSetVarBaseBeforeTrain(self):
seed = 90
......@@ -769,17 +767,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if k == "global_step":
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] + 1))
np.testing.assert_array_equal(v.numpy(),
self.base_opti[v.name] + 1)
if k.find("beta1_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(),
self.base_opti[v.name] * adam._beta1))
np.testing.assert_array_equal(
v.numpy(), self.base_opti[v.name] * adam._beta1)
if k.find("beta2_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(),
self.base_opti[v.name] * adam._beta2))
np.testing.assert_array_equal(
v.numpy(), self.base_opti[v.name] * adam._beta2)
# check parameter
......@@ -789,7 +785,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
np.testing.assert_array_equal(new_t, base_t)
def func_testSetNumpyBeforeTrain(self):
seed = 90
......@@ -870,17 +866,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if k == "global_step":
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] + 1))
np.testing.assert_array_equal(v.numpy(),
self.base_opti[v.name] + 1)
if k.find("beta1_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(),
self.base_opti[v.name] * adam._beta1))
np.testing.assert_array_equal(
v.numpy(), self.base_opti[v.name] * adam._beta1)
if k.find("beta2_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(),
self.base_opti[v.name] * adam._beta2))
np.testing.assert_array_equal(
v.numpy(), self.base_opti[v.name] * adam._beta2)
# check parameter
......@@ -890,7 +884,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
np.testing.assert_array_equal(new_t, base_t)
def func_testOnlyLoadParams(self):
with fluid.dygraph.guard():
......
......@@ -401,8 +401,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name]))
np.testing.assert_array_equal(v.numpy(),
self.base_opti[v.name])
else:
self.assertEqual(v, self.base_opti[k])
......@@ -423,7 +423,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
np.testing.assert_array_equal(new_t, base_t)
def func_testSetVariable(self):
seed = 90
......@@ -508,8 +508,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name]))
np.testing.assert_array_equal(v.numpy(),
self.base_opti[v.name])
else:
self.assertEqual(v, self.base_opti[k])
......@@ -530,7 +530,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
np.testing.assert_array_equal(new_t, base_t)
def func_testSetNumpy(self):
seed = 90
......@@ -619,8 +619,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if isinstance(v, (core.VarBase, core.eager.Tensor)):
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name]))
np.testing.assert_array_equal(v.numpy(),
self.base_opti[v.name])
else:
self.assertEqual(v, self.base_opti[k])
......@@ -643,7 +643,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
np.testing.assert_array_equal(new_t, base_t)
def func_testSetVariableBeforeTrain(self):
seed = 90
......@@ -702,17 +702,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if k == "global_step":
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] + 1))
np.testing.assert_array_equal(v.numpy(),
self.base_opti[v.name] + 1)
if k.find("beta1_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(),
self.base_opti[v.name] * adam._beta1))
np.testing.assert_array_equal(
v.numpy(), self.base_opti[v.name] * adam._beta1)
if k.find("beta2_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(),
self.base_opti[v.name] * adam._beta2))
np.testing.assert_array_equal(
v.numpy(), self.base_opti[v.name] * adam._beta2)
state_dict = ptb_model.state_dict()
......@@ -720,7 +718,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
np.testing.assert_array_equal(new_t, base_t)
def func_testLoadAndSetVarBaseBeforeTrain(self):
seed = 90
......@@ -790,17 +788,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if k == "global_step":
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] + 1))
np.testing.assert_array_equal(v.numpy(),
self.base_opti[v.name] + 1)
if k.find("beta1_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(),
self.base_opti[v.name] * adam._beta1))
np.testing.assert_array_equal(
v.numpy(), self.base_opti[v.name] * adam._beta1)
if k.find("beta2_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(),
self.base_opti[v.name] * adam._beta2))
np.testing.assert_array_equal(
v.numpy(), self.base_opti[v.name] * adam._beta2)
# check parameter
......@@ -810,7 +806,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
np.testing.assert_array_equal(new_t, base_t)
def func_testSetNumpyBeforeTrain(self):
seed = 90
......@@ -892,18 +888,15 @@ class TestDygraphPtbRnn(unittest.TestCase):
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if k == "LR_Scheduler":
self.assertTrue(
np.array_equal(v['last_epoch'],
self.base_opti[k]['last_epoch'] + 1))
np.testing.assert_array_equal(
v['last_epoch'], self.base_opti[k]['last_epoch'] + 1)
if k.find("beta1_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(),
self.base_opti[v.name] * adam._beta1))
np.testing.assert_array_equal(
v.numpy(), self.base_opti[v.name] * adam._beta1)
if k.find("beta2_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(),
self.base_opti[v.name] * adam._beta2))
np.testing.assert_array_equal(
v.numpy(), self.base_opti[v.name] * adam._beta2)
# check parameter
......@@ -913,7 +906,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
np.testing.assert_array_equal(new_t, base_t)
def func_testOnlyLoadParams(self):
with fluid.dygraph.guard():
......
......@@ -203,13 +203,11 @@ class TestDygraphSimpleNet(unittest.TestCase):
static_param_updated[static_param_name_list[
k - 1]] = out[k]
self.assertTrue(np.array_equal(static_loss_value,
dy_loss_value))
np.testing.assert_array_equal(static_loss_value, dy_loss_value)
for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key]))
np.testing.assert_array_equal(value, dy_param_init[key])
for key, value in six.iteritems(static_param_updated):
self.assertTrue(np.array_equal(value,
dy_param_updated[key]))
np.testing.assert_array_equal(value, dy_param_updated[key])
if __name__ == '__main__':
......
......@@ -313,11 +313,11 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
self.load_and_train_static()
# Phase 3. compare
self.assertTrue(np.array_equal(static_x_data, dy_x_data))
np.testing.assert_array_equal(static_x_data, dy_x_data)
for key, value in six.iteritems(static_param_init_value):
key = dict_old_new_init[key]
self.assertTrue(np.array_equal(value, dy_param_init_value[key]))
np.testing.assert_array_equal(value, dy_param_init_value[key])
# np.testing.assert_array_almost_equal(static_out, dy_out)
self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04))
......@@ -341,10 +341,10 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
self.load_and_train_static()
# Phase 3. compare
self.assertTrue(np.array_equal(static_x_data, dy_x_data))
np.testing.assert_array_equal(static_x_data, dy_x_data)
for key, value in six.iteritems(static_param_init_value):
key = dict_old_new_init[key]
self.assertTrue(np.array_equal(value, dy_param_init_value[key]))
np.testing.assert_array_equal(value, dy_param_init_value[key])
# np.testing.assert_array_almost_equal(static_out, dy_out)
self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04))
......@@ -368,7 +368,7 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase):
self.load_and_infer_static()
# Phase 3. compare
self.assertTrue(np.array_equal(static_x_data, dy_x_data))
np.testing.assert_array_equal(static_x_data, dy_x_data)
np.testing.assert_array_almost_equal(static_out, dy_out)
self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04))
......
......@@ -232,7 +232,7 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase):
static_param_init_value.keys())
for key, value in six.iteritems(static_param_init_value):
key = dict_old_new_init[key]
self.assertTrue(np.array_equal(value, dy_param_init_value[key]))
np.testing.assert_array_equal(value, dy_param_init_value[key])
self.assertTrue(np.allclose(static_out, dy_out))
......
......@@ -62,7 +62,7 @@ class TestTracedLayerRecordNonPersistableInput(unittest.TestCase):
dygraph_out = layer(in_x)
dygraph_out_numpy = dygraph_out.numpy()
static_out = traced_layer([in_x])[0]
self.assertTrue(np.array_equal(dygraph_out_numpy, static_out))
np.testing.assert_array_equal(dygraph_out_numpy, static_out)
loss = fluid.layers.reduce_mean(dygraph_out)
loss.backward()
......
......@@ -1133,19 +1133,19 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
static_param_updated[static_param_name_list[k -
4]] = out[k]
if _in_legacy_dygraph():
self.assertTrue(
np.array_equal(static_avg_cost_value, dy_avg_cost_value))
self.assertTrue(
np.array_equal(static_sum_cost_value, dy_sum_cost_value))
self.assertTrue(
np.array_equal(static_predict_value, dy_predict_value))
self.assertTrue(
np.array_equal(static_token_num_value, dy_token_num_value))
np.testing.assert_array_equal(static_avg_cost_value,
dy_avg_cost_value)
np.testing.assert_array_equal(static_sum_cost_value,
dy_sum_cost_value)
np.testing.assert_array_equal(static_predict_value,
dy_predict_value)
np.testing.assert_array_equal(static_token_num_value,
dy_token_num_value)
for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key]))
np.testing.assert_array_equal(value, dy_param_init[key])
for key, value in six.iteritems(static_param_updated):
self.assertTrue(np.array_equal(value, dy_param_updated[key]))
np.testing.assert_array_equal(value, dy_param_updated[key])
# compare eager result with imperative result
with guard():
......@@ -1164,7 +1164,7 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
self.assertTrue(np.allclose(dy_token_num_value, eager_token_num_value))
for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, eager_param_init[key]))
np.testing.assert_array_equal(value, eager_param_init[key])
for key, value in six.iteritems(dy_param_updated):
self.assertTrue(np.allclose(value, eager_param_updated[key]))
......
......@@ -78,37 +78,34 @@ class TestDygraphTripleGradMatmul(TestCase):
new_a.backward()
out_ref = np.ones([3, 3]) * 12.0
self.assertTrue(np.array_equal(out.numpy(), out_ref))
np.testing.assert_array_equal(out.numpy(), out_ref)
new_x_g_ref = np.ones([3, 3]) * 6.0
new_y_g_ref = np.ones([3, 3]) * 6.0
self.assertTrue(np.array_equal(new_x_g.numpy(), new_x_g_ref))
self.assertTrue(np.array_equal(new_y_g.numpy(), new_y_g_ref))
np.testing.assert_array_equal(new_x_g.numpy(), new_x_g_ref)
np.testing.assert_array_equal(new_y_g.numpy(), new_y_g_ref)
new_a_ref = np.ones([3, 3]) * 3.0
new_b_ref = np.ones([3, 3]) * 3.0
new_c_ref = np.ones([3, 3]) * 12.0
self.assertTrue(np.array_equal(new_a.numpy(), new_a_ref))
self.assertTrue(np.array_equal(new_b.numpy(), new_b_ref))
self.assertTrue(np.array_equal(new_c.numpy(), new_c_ref))
np.testing.assert_array_equal(new_a.numpy(), new_a_ref)
np.testing.assert_array_equal(new_b.numpy(), new_b_ref)
np.testing.assert_array_equal(new_c.numpy(), new_c_ref)
x_grad_ref = np.ones([3, 3]) * 0.0
self.assertTrue(np.array_equal(x.grad.numpy(), x_grad_ref))
np.testing.assert_array_equal(x.grad.numpy(), x_grad_ref)
y_grad_ref = np.ones([3, 3]) * 0.0
self.assertTrue(np.array_equal(y.grad.numpy(), y_grad_ref))
np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref)
new_out_g_ref = np.ones([3, 3]) * 3.0
self.assertTrue(
np.array_equal(new_out_g.grad.numpy(), new_out_g_ref))
np.testing.assert_array_equal(new_out_g.grad.numpy(), new_out_g_ref)
new_x_g_g_ref = np.ones([3, 3]) * 0.0
new_y_g_g_ref = np.ones([3, 3]) * 3.0
self.assertTrue(
np.array_equal(new_x_g_g.grad.numpy(), new_x_g_g_ref))
self.assertTrue(
np.array_equal(new_y_g_g.grad.numpy(), new_y_g_g_ref))
np.testing.assert_array_equal(new_x_g_g.grad.numpy(), new_x_g_g_ref)
np.testing.assert_array_equal(new_y_g_g.grad.numpy(), new_y_g_g_ref)
class TestDygraphTripleGrad(TestCase):
......
......@@ -25,7 +25,7 @@ class TestImperativeUsingNonZeroGpu(unittest.TestCase):
def run_main(self, np_arr, place):
with guard(place):
var = to_variable(np_arr)
self.assertTrue(np.array_equal(np_arr, var.numpy()))
np.testing.assert_array_equal(np_arr, var.numpy())
def func_non_zero_gpu(self):
if not fluid.is_compiled_with_cuda():
......
......@@ -795,8 +795,8 @@ class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase):
dynamic_res = run_dynamic_graph()
static_res = run_static_graph()
self.assertTrue(np.array_equal(dynamic_res[0], static_res[0]))
self.assertTrue(np.array_equal(dynamic_res[1], static_res[1]))
np.testing.assert_array_equal(dynamic_res[0], static_res[0])
np.testing.assert_array_equal(dynamic_res[1], static_res[1])
def test_order(self):
with framework._test_eager_guard():
......@@ -819,7 +819,7 @@ class TestOrthogonalInitializer1(unittest.TestCase):
self.num_ops = 9
def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b))
np.testing.assert_array_equal(a, b)
self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(10)))
def func_orthogonal(self):
......@@ -878,7 +878,7 @@ class TestOrthogonalInitializer2(TestOrthogonalInitializer1):
self.num_ops = 8
def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b))
np.testing.assert_array_equal(a, b)
self.assertTrue(np.allclose(np.matmul(a.T, a), 4 * np.eye(10)))
......@@ -897,7 +897,7 @@ class TestOrthogonalInitializer3(TestOrthogonalInitializer1):
self.num_ops = 8
def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b))
np.testing.assert_array_equal(a, b)
self.assertTrue(np.allclose(np.matmul(a.T, a), np.eye(10), atol=1.e-6))
self.assertTrue(np.allclose(np.matmul(a, a.T), np.eye(10), atol=1.e-6))
......@@ -922,7 +922,7 @@ class TestOrthogonalInitializer4(unittest.TestCase):
self.kernel_size = (3, 3)
def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b))
np.testing.assert_array_equal(a, b)
a = a.reshape(6, -1)
self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(6)))
......@@ -973,7 +973,7 @@ class TestOrthogonalInitializer5(TestOrthogonalInitializer4):
self.kernel_size = (3, 3)
def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b))
np.testing.assert_array_equal(a, b)
a = a.reshape(50, -1)
self.assertTrue(np.allclose(np.matmul(a.T, a), 4 * np.eye(36)))
......@@ -993,7 +993,7 @@ class TestOrthogonalInitializer6(TestOrthogonalInitializer4):
self.kernel_size = (3, 3)
def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b))
np.testing.assert_array_equal(a, b)
a = a.reshape(36, -1)
self.assertTrue(np.allclose(np.matmul(a.T, a), np.eye(36), atol=1.e-6))
self.assertTrue(np.allclose(np.matmul(a, a.T), np.eye(36), atol=1.e-6))
......@@ -1014,8 +1014,8 @@ class TestDiracInitializer1(unittest.TestCase):
self.num_ops = 8 #fill_constant*2, reshape*2, assign_value*2, scatter, cast
def check_result(self, w_dygraph, w_static, conv_in, conv_out):
self.assertTrue(np.array_equal(w_dygraph, w_static))
self.assertTrue(np.array_equal(conv_out, conv_in[:, 0:2, 1:9]))
np.testing.assert_array_equal(w_dygraph, w_static)
np.testing.assert_array_equal(conv_out, conv_in[:, 0:2, 1:9])
def func_dirac(self):
self.config()
......@@ -1079,11 +1079,11 @@ class TestDiracInitializer2(TestDiracInitializer1):
self.num_ops = 8
def check_result(self, w_dygraph, w_static, conv_in, conv_out):
self.assertTrue(np.array_equal(w_dygraph, w_static))
self.assertTrue(
np.array_equal(conv_out[:, 0:4, :, :], conv_in[:, :, 1:9, 1:9]))
self.assertTrue(
np.array_equal(conv_out[:, 4:8, :, :], np.zeros([8, 4, 8, 8])))
np.testing.assert_array_equal(w_dygraph, w_static)
np.testing.assert_array_equal(conv_out[:, 0:4, :, :], conv_in[:, :, 1:9,
1:9])
np.testing.assert_array_equal(conv_out[:, 4:8, :, :],
np.zeros([8, 4, 8, 8]))
# initialize Conv3D weight
......@@ -1101,13 +1101,11 @@ class TestDiracInitializer3(TestDiracInitializer1):
self.num_ops = 7
def check_result(self, w_dygraph, w_static, conv_in, conv_out):
self.assertTrue(np.array_equal(w_dygraph, w_static))
self.assertTrue(
np.array_equal(conv_out[:, 0:5, :, :, :], conv_in[:, :, 1:9, 1:9,
1:9]))
self.assertTrue(
np.array_equal(conv_out[:, 5:10, :, :, :], conv_in[:, :, 1:9, 1:9,
1:9]))
np.testing.assert_array_equal(w_dygraph, w_static)
np.testing.assert_array_equal(conv_out[:, 0:5, :, :, :],
conv_in[:, :, 1:9, 1:9, 1:9])
np.testing.assert_array_equal(conv_out[:, 5:10, :, :, :],
conv_in[:, :, 1:9, 1:9, 1:9])
def test_error(self):
self.config()
......
......@@ -142,7 +142,7 @@ class TestDygraphInplace(unittest.TestCase):
self.assertTrue(id(var) == id(inplace_var))
inplace_var[0] = 2.
self.assertTrue(np.array_equal(var.numpy(), inplace_var.numpy()))
np.testing.assert_array_equal(var.numpy(), inplace_var.numpy())
def test_inplace_api(self):
with _test_eager_guard():
......@@ -276,7 +276,7 @@ class TestDygraphInplace(unittest.TestCase):
loss.backward()
grad_var_a = var_a.grad.numpy()
self.assertTrue(np.array_equal(grad_var_a_inplace, grad_var_a))
np.testing.assert_array_equal(grad_var_a_inplace, grad_var_a)
def test_backward_success_2(self):
with _test_eager_guard():
......@@ -506,7 +506,7 @@ class TestLossIsInplaceVar(unittest.TestCase):
loss.backward()
grad_var_a = var_a.grad.numpy()
self.assertTrue(np.array_equal(inplace_grad_var_a, grad_var_a))
np.testing.assert_array_equal(inplace_grad_var_a, grad_var_a)
def test_loss_is_inplace_var(self):
with _test_eager_guard():
......
......@@ -108,7 +108,7 @@ class TestInplaceAddto(unittest.TestCase):
res1, w1 = run_program(True)
res2, w2 = run_program(False)
self.assertTrue(np.array_equal(res1, res2))
np.testing.assert_array_equal(res1, res2)
def test_nchw(self):
self.check_result()
......
......@@ -56,7 +56,7 @@ class TestStaticAutoGeneratedAPI(unittest.TestCase):
feed={"x": self.np_x},
fetch_list=[x, out])
self.assertTrue(np.array_equal(fetch_x, self.np_x))
np.testing.assert_array_equal(fetch_x, self.np_x)
self.assertTrue(
self.np_compare(fetch_out, self.executed_numpy_api(self.np_x)))
......
......@@ -388,9 +388,9 @@ class TestJitSaveLoad(unittest.TestCase):
# inference & compare
x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x).numpy(),
infer_layer(x).numpy()))
np.testing.assert_array_equal(
train_layer(x).numpy(),
infer_layer(x).numpy())
def load_and_finetune(self, train_layer, load_train_layer):
train_layer.train()
......@@ -398,8 +398,8 @@ class TestJitSaveLoad(unittest.TestCase):
# train & compare
img0, _, train_loss = train(train_layer)
img1, _, load_train_loss = train(load_train_layer)
self.assertTrue(
np.array_equal(train_loss.numpy(), load_train_loss.numpy()))
np.testing.assert_array_equal(train_loss.numpy(),
load_train_loss.numpy())
def load_dygraph_state_dict(self, train_layer):
train_layer.eval()
......@@ -414,9 +414,9 @@ class TestJitSaveLoad(unittest.TestCase):
# inference & compare
x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x).numpy(),
new_layer(x).numpy()))
np.testing.assert_array_equal(
train_layer(x).numpy(),
new_layer(x).numpy())
def test_load_dygraph_no_path(self):
model_path = os.path.join(self.temp_dir.name,
......@@ -673,9 +673,9 @@ class TestJitSaveLoadConfig(unittest.TestCase):
infer_layer = paddle.jit.load(model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x)[0].numpy(),
infer_layer(x).numpy()))
np.testing.assert_array_equal(
train_layer(x)[0].numpy(),
infer_layer(x).numpy())
def test_save_no_support_config_error(self):
layer = LinearNet(784, 1)
......@@ -778,9 +778,9 @@ class TestJitPruneModelAndLoad(unittest.TestCase):
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
self.assertTrue(
np.array_equal(train_layer(x)[0].numpy(),
infer_layer(x).numpy()))
np.testing.assert_array_equal(
train_layer(x)[0].numpy(),
infer_layer(x).numpy())
def test_load_var_not_in_extra_var_info(self):
self.train_and_save()
......@@ -831,10 +831,12 @@ class TestJitSaveMultiCases(unittest.TestCase):
else:
pred = layer(x).numpy()
loaded_pred = loaded_layer(x).numpy()
self.assertTrue(
np.array_equal(pred, loaded_pred),
msg="Result diff when load and inference:\nlayer result:\n{}\n" \
"loaded layer result:\n{}".format(pred, loaded_pred))
np.testing.assert_array_equal(
pred,
loaded_pred,
err_msg=
'Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}'
.format(pred, loaded_pred))
def test_no_prune_to_static_after_train(self):
layer = LinearNet(784, 1)
......@@ -1056,7 +1058,7 @@ class TestJitSaveLoadEmptyLayer(unittest.TestCase):
paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path)
load_out = load_layer(x)
self.assertTrue(np.array_equal(out, load_out))
np.testing.assert_array_equal(out, load_out)
class TestJitSaveLoadNoParamLayer(unittest.TestCase):
......@@ -1079,7 +1081,7 @@ class TestJitSaveLoadNoParamLayer(unittest.TestCase):
paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path)
load_out = load_layer(x, y)
self.assertTrue(np.array_equal(out, load_out))
np.testing.assert_array_equal(out, load_out)
class TestJitSaveLoadMultiMethods(unittest.TestCase):
......@@ -1506,7 +1508,7 @@ class TestJitSaveLoadFunctionWithParamCase1(unittest.TestCase):
load_func = paddle.jit.load(path)
load_result = load_func(inps)
self.assertTrue(np.array_equal(load_result.numpy(), origin.numpy()))
np.testing.assert_array_equal(load_result.numpy(), origin.numpy())
class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase):
......@@ -1546,8 +1548,8 @@ class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase):
load_result = load_func(inps)
self.assertTrue(
np.array_equal(origin_result.numpy(), load_result.numpy()))
np.testing.assert_array_equal(origin_result.numpy(),
load_result.numpy())
class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase):
......@@ -1586,7 +1588,7 @@ class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase):
load_func = paddle.jit.load(path)
load_result = load_func(inps)
self.assertTrue(np.array_equal(load_result.numpy(), origin.numpy()))
np.testing.assert_array_equal(load_result.numpy(), origin.numpy())
class TestJitSaveLoadDataParallel(unittest.TestCase):
......@@ -1605,10 +1607,12 @@ class TestJitSaveLoadDataParallel(unittest.TestCase):
x = paddle.to_tensor(np.random.random((1, 784)).astype('float32'))
pred = layer(x).numpy()
loaded_pred = loaded_layer(x).numpy()
self.assertTrue(
np.array_equal(pred, loaded_pred),
msg="Result diff when load and inference:\nlayer result:\n{}\n" \
"loaded layer result:\n{}".format(pred, loaded_pred))
np.testing.assert_array_equal(
pred,
loaded_pred,
err_msg=
'Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}'
.format(pred, loaded_pred))
def test_jit_save_data_parallel_with_inputspec(self):
layer = LinearNetNotDeclarative(784, 1)
......
......@@ -237,8 +237,8 @@ class TestLambOpMultiPrecision(unittest.TestCase):
if multi_precision:
params[0] = np.array(params[0])
params[1] = np.array(params[1])
self.assertTrue(
np.array_equal(params[0], params[1].astype(np.float16)))
np.testing.assert_array_equal(params[0],
params[1].astype(np.float16))
return params[0].astype(np.float32)
else:
self.assertTrue(params[0] is not None)
......@@ -259,9 +259,8 @@ class TestLambOpMultiPrecision(unittest.TestCase):
fetch_list=[weight, bias])
weight_np = weight_np.astype('float32')
bias_np = bias_np.astype('float32')
self.assertTrue(np.array_equal(weight_np,
get_parameter(weight)))
self.assertTrue(np.array_equal(bias_np, get_parameter(bias)))
np.testing.assert_array_equal(weight_np, get_parameter(weight))
np.testing.assert_array_equal(bias_np, get_parameter(bias))
return weight_np, bias_np
@switch_to_static_graph
......
......@@ -381,7 +381,7 @@ class TestFP16ScaleBiasLayerNorm(unittest.TestCase):
x_np, weight_np, bias_np, 'float32')
def assert_equal(x, y):
self.assertTrue(np.array_equal(x, y))
np.testing.assert_array_equal(x, y)
assert_equal(y_np_1, y_np_2)
assert_equal(x_g_np_1, x_g_np_2)
......
......@@ -62,7 +62,7 @@ class TestLoadOp(unittest.TestCase):
exe = fluid.Executor(fluid.CPUPlace())
exe.run(start_prog)
ret = exe.run(main_prog, fetch_list=[var.name])
self.assertTrue(np.array_equal(self.ones, ret[0]))
np.testing.assert_array_equal(self.ones, ret[0])
if __name__ == "__main__":
......
......@@ -64,7 +64,7 @@ class TestLoadOpXpu(unittest.TestCase):
exe = fluid.Executor(fluid.XPUPlace(0))
exe.run(start_prog)
ret = exe.run(main_prog, fetch_list=[var.name])
self.assertTrue(np.array_equal(self.ones, ret[0]))
np.testing.assert_array_equal(self.ones, ret[0])
if __name__ == "__main__":
......
......@@ -123,7 +123,7 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase):
def check_load_state_dict(self, orig_dict, load_dict):
for var_name, value in six.iteritems(orig_dict):
self.assertTrue(np.array_equal(value, load_dict[var_name]))
np.testing.assert_array_equal(value, load_dict[var_name])
def test_load_default(self):
self.save_dirname = os.path.join(
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册