未验证 提交 99fb293c 编写于 作者: R ronnywang 提交者: GitHub

[MLU] use np.testing.assert_allclose instead of assertTrue(np.allclose(...)) (#44801)

上级 463fc15e
......@@ -303,8 +303,8 @@ class TestNet(unittest.TestCase):
def test_mlu(self):
mlu_pred, mlu_loss = self._test(True)
cpu_pred, cpu_loss = self._test(False)
self.assertTrue(np.allclose(mlu_pred, cpu_pred, rtol=1e-3))
self.assertTrue(np.allclose(mlu_loss, cpu_loss, rtol=1e-3))
np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-3)
np.testing.assert_allclose(mlu_loss, cpu_loss, rtol=1e-3)
if __name__ == '__main__':
......
......@@ -249,8 +249,8 @@ class TestNet(unittest.TestCase):
def test_mlu(self):
mlu_pred, mlu_loss = self._test(True)
cpu_pred, cpu_loss = self._test(False)
self.assertTrue(np.allclose(mlu_pred, cpu_pred, rtol=1e-3))
self.assertTrue(np.allclose(mlu_loss, cpu_loss, rtol=1e-3))
np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-3)
np.testing.assert_allclose(mlu_loss, cpu_loss, rtol=1e-3)
if __name__ == '__main__':
......
......@@ -219,7 +219,10 @@ class TestBatchNormOpInference(unittest.TestCase):
self.init_kernel_type()
def __assert_close(self, tensor, np_array, msg, atol=1e-4):
self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg)
np.testing.assert_allclose(np.array(tensor),
np_array,
atol=atol,
err_msg=msg)
def check_with_place(self, place, data_layout, dtype, shape):
epsilon = 0.00001
......@@ -672,7 +675,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase):
x = np.random.randn(*shape).astype("float32")
y1 = compute(x, False, False)
y2 = compute(x, True, True)
self.assertTrue(np.allclose(y1, y2))
np.testing.assert_allclose(y1, y2)
def test_static(self):
places = [fluid.CPUPlace()]
......@@ -697,7 +700,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase):
x = np.random.randn(*shape).astype("float32")
y1 = compute(x, False, False)
y2 = compute(x, True, True)
self.assertTrue(np.allclose(y1, y2))
np.testing.assert_allclose(y1, y2)
class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase):
......
......@@ -137,8 +137,8 @@ class TestBatchNorm(unittest.TestCase):
y2 = compute_v2(x)
y3 = compute_v3(x, False, False)
y4 = compute_v4(x)
self.assertTrue(np.allclose(y1, y2))
self.assertTrue(np.allclose(y3, y4))
np.testing.assert_allclose(y1, y2)
np.testing.assert_allclose(y3, y4)
def test_static(self):
places = [fluid.CPUPlace()]
......@@ -172,7 +172,7 @@ class TestBatchNorm(unittest.TestCase):
x = np.random.randn(*shape).astype("float32")
y1 = compute_v1(x, False, False)
y2 = compute_v2(x)
self.assertTrue(np.allclose(y1, y2))
np.testing.assert_allclose(y1, y2)
class TestBatchNormChannelLast(unittest.TestCase):
......
......@@ -58,7 +58,7 @@ def test_static_layer(place,
"weight": weight_np
},
fetch_list=[res])
return static_result
return static_result[0]
def test_static_functional(place,
......@@ -98,7 +98,7 @@ def test_static_functional(place,
"weight": weight_np
},
fetch_list=[res])
return static_result
return static_result[0]
def test_dygraph_layer(place,
......@@ -174,16 +174,18 @@ class TestBCELoss(unittest.TestCase):
dy_result = test_dygraph_layer(place, input_np, label_np,
reduction)
expected = calc_bceloss(input_np, label_np, reduction)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-6)
np.testing.assert_allclose(static_result, dy_result)
np.testing.assert_allclose(dy_result, expected, rtol=1e-6)
static_functional = test_static_functional(
place, input_np, label_np, reduction)
dy_functional = test_dygraph_functional(place, input_np,
label_np, reduction)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
np.testing.assert_allclose(static_functional,
expected,
rtol=1e-6)
np.testing.assert_allclose(static_functional, dy_functional)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-6)
def test_BCELoss_weight(self):
input_np = np.random.uniform(0.1, 0.8,
......@@ -207,9 +209,9 @@ class TestBCELoss(unittest.TestCase):
label_np,
reduction,
weight_np=weight_np)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-6)
np.testing.assert_allclose(static_result, dy_result)
np.testing.assert_allclose(dy_result, expected, rtol=1e-6)
static_functional = test_static_functional(place,
input_np,
label_np,
......@@ -220,9 +222,9 @@ class TestBCELoss(unittest.TestCase):
label_np,
reduction,
weight_np=weight_np)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
np.testing.assert_allclose(static_functional, expected, rtol=1e-6)
np.testing.assert_allclose(static_functional, dy_functional)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-6)
def test_BCELoss_error(self):
paddle.disable_static()
......
......@@ -61,7 +61,7 @@ def test_static(place,
res = call_bce_layer(logit, label, weight, reduction, pos_weight)
exe = paddle.static.Executor(place)
static_result = exe.run(prog, feed=feed_dict, fetch_list=[res])
return static_result
return static_result[0]
paddle.enable_static()
......@@ -86,9 +86,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
reduction=reduction)
expected = calc_bce_with_logits_loss(logit_np, label_np,
reduction)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-6)
np.testing.assert_allclose(static_result, dy_result)
np.testing.assert_allclose(dy_result, expected, rtol=1e-6)
static_functional = test_static(place,
logit_np,
label_np,
......@@ -100,9 +100,11 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
reduction=reduction,
functional=True)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
np.testing.assert_allclose(static_functional,
expected,
rtol=1e-6)
np.testing.assert_allclose(static_functional, dy_functional)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-6)
def test_BCEWithLogitsLoss_weight(self):
logit_np = np.random.uniform(0.1, 0.8,
......@@ -126,9 +128,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
label_np,
reduction,
weight_np=weight_np)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-6)
np.testing.assert_allclose(static_result, dy_result)
np.testing.assert_allclose(dy_result, expected, rtol=1e-6)
static_functional = test_static(place,
logit_np,
label_np,
......@@ -141,9 +143,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
weight_np=weight_np,
reduction=reduction,
functional=True)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
np.testing.assert_allclose(static_functional, expected, rtol=1e-6)
np.testing.assert_allclose(static_functional, dy_functional)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-6)
def test_BCEWithLogitsLoss_pos_weight(self):
logit_np = np.random.uniform(0.1, 0.8,
......@@ -160,9 +162,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
reduction, pos_weight_np)
expected = calc_bce_with_logits_loss(logit_np, label_np, reduction,
weight_np, pos_weight_np)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected)
np.testing.assert_allclose(static_result, dy_result)
np.testing.assert_allclose(dy_result, expected)
static_functional = test_static(place,
logit_np,
label_np,
......@@ -177,9 +179,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
reduction,
pos_weight_np,
functional=True)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
np.testing.assert_allclose(static_functional, expected)
np.testing.assert_allclose(static_functional, dy_functional)
np.testing.assert_allclose(dy_functional, expected)
def test_BCEWithLogitsLoss_error(self):
paddle.disable_static()
......
......@@ -563,7 +563,7 @@ class TestBilinearInterpOpAPI(unittest.TestCase):
out_w=12,
align_corners=True)
for res in results:
self.assertTrue(np.allclose(res, expect_res))
np.testing.assert_allclose(res, expect_res, rtol=1e-6)
class TestBilinearInterpOpAPI_dy(unittest.TestCase):
......@@ -585,7 +585,7 @@ class TestBilinearInterpOpAPI_dy(unittest.TestCase):
size=[12, 12],
mode="bilinear",
align_corners=False)
self.assertTrue(np.allclose(out.numpy(), expect_res))
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6)
class TestBilinearInterpOpAPI_dy2(unittest.TestCase):
......@@ -609,7 +609,7 @@ class TestBilinearInterpOpAPI_dy2(unittest.TestCase):
size=size,
mode="bilinear",
align_corners=False)
self.assertTrue(np.allclose(out.numpy(), expect_res))
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6)
class TestBilinearInterpOpAPI_dy3(unittest.TestCase):
......@@ -633,7 +633,7 @@ class TestBilinearInterpOpAPI_dy3(unittest.TestCase):
size=[size, size],
mode="bilinear",
align_corners=False)
self.assertTrue(np.allclose(out.numpy(), expect_res))
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6)
class TestBilinearInterpOpAPI_dy4(unittest.TestCase):
......@@ -658,7 +658,7 @@ class TestBilinearInterpOpAPI_dy4(unittest.TestCase):
mode="bilinear",
align_corners=False)
self.assertTrue(np.allclose(out.numpy(), expect_res))
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6)
if __name__ == "__main__":
......
......@@ -210,22 +210,26 @@ class TestDistBase(unittest.TestCase):
input2 = np.random.random((10, 1000)).astype(np_data_type)
if col_type == "broadcast":
need_result = input2
self.assertTrue(np.allclose(tr0_out, need_result))
self.assertTrue(np.allclose(tr1_out, need_result))
np.testing.assert_allclose(tr0_out, need_result)
np.testing.assert_allclose(tr1_out, need_result)
elif col_type == "allreduce":
need_result = input1 + input2
self.assertTrue(
np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05))
self.assertTrue(
np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05))
np.testing.assert_allclose(tr0_out,
need_result,
rtol=1e-05,
atol=1e-05)
np.testing.assert_allclose(tr1_out,
need_result,
rtol=1e-05,
atol=1e-05)
elif col_type == "reduce":
need_result = input1 + input2
self.assertTrue(np.allclose(tr0_out, need_result))
np.testing.assert_allclose(tr0_out, need_result)
elif col_type == "allgather":
need_result = np.vstack((input1, input2))
tr_out0 = np.vstack((tr0_out[0], tr0_out[1]))
tr_out1 = np.vstack((tr1_out[0], tr1_out[1]))
self.assertTrue(np.allclose(tr_out0, need_result))
self.assertTrue(np.allclose(tr_out1, need_result))
np.testing.assert_allclose(tr_out0, need_result)
np.testing.assert_allclose(tr_out1, need_result)
else:
pass
......@@ -259,47 +259,63 @@ class TestDistBase(unittest.TestCase):
input2 = np.random.random((10, 1000)).astype(np_data_type)
if col_type == "broadcast":
need_result = input2
self.assertTrue(np.allclose(tr0_out, need_result))
self.assertTrue(np.allclose(tr1_out, need_result))
np.testing.assert_allclose(tr0_out, need_result)
np.testing.assert_allclose(tr1_out, need_result)
elif col_type == "allreduce_sum":
need_result = input1 + input2
self.assertTrue(
np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05))
self.assertTrue(
np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05))
np.testing.assert_allclose(tr0_out,
need_result,
rtol=1e-05,
atol=1e-05)
np.testing.assert_allclose(tr1_out,
need_result,
rtol=1e-05,
atol=1e-05)
elif col_type == "allreduce_prod":
need_result = input1 * input2
self.assertTrue(
np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05))
self.assertTrue(
np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05))
np.testing.assert_allclose(tr0_out,
need_result,
rtol=1e-05,
atol=1e-05)
np.testing.assert_allclose(tr1_out,
need_result,
rtol=1e-05,
atol=1e-05)
elif col_type == "allreduce_max":
need_result = np.maximum(input1, input2)
self.assertTrue(
np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05))
self.assertTrue(
np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05))
np.testing.assert_allclose(tr0_out,
need_result,
rtol=1e-05,
atol=1e-05)
np.testing.assert_allclose(tr1_out,
need_result,
rtol=1e-05,
atol=1e-05)
elif col_type == "allreduce_min":
need_result = np.minimum(input1, input2)
self.assertTrue(
np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05))
self.assertTrue(
np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05))
np.testing.assert_allclose(tr0_out,
need_result,
rtol=1e-05,
atol=1e-05)
np.testing.assert_allclose(tr1_out,
need_result,
rtol=1e-05,
atol=1e-05)
elif col_type == "reduce_sum":
need_result = input1 + input2
self.assertTrue(np.allclose(tr1_out, need_result))
np.testing.assert_allclose(tr1_out, need_result)
elif col_type == "reduce_prod":
need_result = input1 * input2
self.assertTrue(np.allclose(tr1_out, need_result))
np.testing.assert_allclose(tr1_out, need_result)
elif col_type == "reduce_max":
need_result = np.maximum(input1, input2)
self.assertTrue(np.allclose(tr1_out, need_result))
np.testing.assert_allclose(tr1_out, need_result)
elif col_type == "reduce_min":
need_result = np.minimum(input1, input2)
self.assertTrue(np.allclose(tr1_out, need_result))
np.testing.assert_allclose(tr1_out, need_result)
elif col_type == "allgather":
need_result = np.vstack((input1, input2))
self.assertTrue(np.allclose(tr0_out, need_result))
self.assertTrue(np.allclose(tr1_out, need_result))
np.testing.assert_allclose(tr0_out, need_result)
np.testing.assert_allclose(tr1_out, need_result)
else:
pass
......@@ -268,11 +268,11 @@ class TestDropoutAPI(unittest.TestCase):
fetches = exe.run(fluid.default_main_program(),
feed={"input": in_np},
fetch_list=[res])
self.assertTrue(np.allclose(fetches[0], res_np))
np.testing.assert_allclose(fetches[0], res_np)
fetches2 = exe.run(fluid.default_main_program(),
feed={"input": in_np},
fetch_list=[res6])
self.assertTrue(np.allclose(fetches2[0], res_np2))
np.testing.assert_allclose(fetches2[0], res_np2)
def test_static(self):
for place in self.places:
......
......@@ -361,8 +361,8 @@ class TestElementwiseMaxNet(unittest.TestCase):
cpu_pred, cpu_loss = self._test(False)
mlu_pred, mlu_loss = self._test(True)
self.assertTrue(np.allclose(mlu_pred, cpu_pred))
self.assertTrue(np.allclose(mlu_loss, cpu_loss))
np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-6)
np.testing.assert_allclose(mlu_loss, cpu_loss)
if __name__ == '__main__':
......
......@@ -222,8 +222,8 @@ class TestElementwiseMinOpNet(unittest.TestCase):
cpu_pred, cpu_loss = self._test(False)
mlu_pred, mlu_loss = self._test(True)
self.assertTrue(np.allclose(mlu_pred, cpu_pred))
self.assertTrue(np.allclose(mlu_loss, cpu_loss))
np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-6)
np.testing.assert_allclose(mlu_loss, cpu_loss)
if __name__ == '__main__':
......
......@@ -302,7 +302,7 @@ class TestExpandV2DygraphAPI(unittest.TestCase):
expand_1 = paddle.expand(a, shape=[2, 5])
np_array = np.array([2, 5])
expand_2 = paddle.expand(a, shape=np_array)
self.assertTrue(np.array_equal(expand_1.numpy(), expand_2.numpy()))
np.testing.assert_allclose(expand_1.numpy(), expand_2.numpy())
if __name__ == "__main__":
......
......@@ -122,7 +122,7 @@ class TestFillConstantOpWithSelectedRows(unittest.TestCase):
result_array = np.array(out.get_tensor())
full_array = np.full((123, 92), 3.8, 'float32')
self.assertTrue(np.array_equal(result_array, full_array))
np.testing.assert_allclose(result_array, full_array)
def test_fill_constant_with_selected_rows(self):
places = [core.CPUPlace()]
......
......@@ -292,7 +292,7 @@ class TestGatherNdAPI2(unittest.TestCase):
output = paddle.fluid.layers.gather(input, index)
output_np = output.numpy()
expected_output = np.array([3, 4])
self.assertTrue(np.allclose(output_np, expected_output))
np.testing.assert_allclose(output_np[0], expected_output, rtol=1e-6)
paddle.enable_static()
......
......@@ -101,7 +101,7 @@ class API_TestDygraphGather(unittest.TestCase):
output = paddle.fluid.layers.gather(input, index)
output_np = output.numpy()
expected_output = np.array([[3, 4], [5, 6]]).astype('int32')
self.assertTrue(np.allclose(output_np, expected_output))
np.testing.assert_allclose(output_np, expected_output)
paddle.enable_static()
def test_out12(self):
......@@ -113,7 +113,7 @@ class API_TestDygraphGather(unittest.TestCase):
output = paddle.gather(x, index, axis=0)
output_np = output.numpy()
expected_output = gather_numpy(input_1, index_1, axis=0)
self.assertTrue(np.allclose(output_np, expected_output))
np.testing.assert_allclose(output_np, expected_output)
paddle.enable_static()
def test_zero_index(self):
......
......@@ -65,8 +65,12 @@ class TestGaussianRandomOp(OpTest):
hist2, _ = np.histogram(data, range=(-3, 5))
hist2 = hist2.astype("float32")
hist2 /= float(outs[0].size)
self.assertTrue(np.allclose(hist, hist2, rtol=0, atol=0.01),
"hist: " + str(hist) + " hist2: " + str(hist2))
np.testing.assert_allclose(hist,
hist2,
rtol=0,
atol=0.01,
err_msg="hist: " + str(hist) + " hist2: " +
str(hist2))
class TestMeanStdAreInt(TestGaussianRandomOp):
......
......@@ -150,8 +150,8 @@ class TestGeluNet(unittest.TestCase):
cpu_pred, cpu_loss = self._test(False)
mlu_pred, mlu_loss = self._test(True)
self.assertTrue(np.allclose(mlu_pred, cpu_pred, atol=1e-3))
self.assertTrue(np.allclose(mlu_loss, cpu_loss, atol=1e-3))
np.testing.assert_allclose(mlu_pred, cpu_pred, atol=1e-3)
np.testing.assert_allclose(mlu_loss, cpu_loss, atol=1e-3)
if __name__ == '__main__':
......
......@@ -147,7 +147,7 @@ class TestHardsigmoidAPI(unittest.TestCase):
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = ref_hardsigmoid(self.x_np)
for r in res:
self.assertTrue(np.allclose(out_ref, r))
np.testing.assert_allclose(out_ref, r, rtol=1e-6)
def test_dygraph_api(self):
paddle.disable_static(self.place)
......@@ -157,22 +157,23 @@ class TestHardsigmoidAPI(unittest.TestCase):
out2 = m(x)
out_ref = ref_hardsigmoid(self.x_np)
for r in [out1, out2]:
self.assertTrue(np.allclose(out_ref, r.numpy()))
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-6)
paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.hard_sigmoid(x)
exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
self.assertTrue(np.allclose(out_ref, res[0]))
np.testing.assert_allclose(out_ref, res[0])
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out = paddle.fluid.layers.hard_sigmoid(x)
self.assertTrue(np.allclose(out_ref, out.numpy()))
np.testing.assert_allclose(out_ref, out.numpy())
paddle.enable_static()
def test_errors(self):
......
......@@ -45,7 +45,11 @@ class TestLayerNormOp(unittest.TestCase):
self.__class__.use_mlu = True
def __assert_close(self, tensor, np_array, msg, atol=1e-4):
self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg)
np.testing.assert_allclose(np.array(tensor),
np_array,
rtol=1e-5,
atol=atol,
err_msg=msg)
def check_forward_backward(self,
shape,
......@@ -152,11 +156,11 @@ class TestLayerNormOp(unittest.TestCase):
1e-3)
self.__assert_close(x_grad, out[3], "x_grad")
if has_scale:
self.__assert_close(scale_grad,
self.__assert_close(scale_grad.reshape(-1),
out[fetch_list.index('scale@GRAD')],
"scale_grad", 1e-3)
if has_bias:
self.__assert_close(bias_grad,
self.__assert_close(bias_grad.reshape(-1),
out[fetch_list.index('bias@GRAD')],
"bias_grad")
......@@ -287,7 +291,7 @@ class TestFP16ScaleBiasLayerNorm(unittest.TestCase):
x_np, weight_np, bias_np, 'float32')
def assert_equal(x, y):
self.assertTrue(np.array_equal(x, y))
np.testing.assert_allclose(x, y)
assert_equal(y_np_1, y_np_2)
assert_equal(x_g_np_1, x_g_np_2)
......
......@@ -145,8 +145,8 @@ class TestLeakyReluNet(unittest.TestCase):
cpu_pred, cpu_loss = self._test(False)
mlu_pred, mlu_loss = self._test(True)
self.assertTrue(np.allclose(mlu_pred, cpu_pred))
self.assertTrue(np.allclose(mlu_loss, cpu_loss))
np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-6)
np.testing.assert_allclose(mlu_loss, cpu_loss)
if __name__ == '__main__':
......
......@@ -123,7 +123,7 @@ class TestLog2(TestActivation):
feed={"data_x": input_x},
fetch_list=[out1])
expected_res = np.log2(input_x)
self.assertTrue(np.allclose(res1, expected_res))
np.testing.assert_allclose(res1[0], expected_res, rtol=1e-6)
# dygraph
with fluid.dygraph.guard():
......@@ -134,7 +134,7 @@ class TestLog2(TestActivation):
z_expected = np.array(np.log2(np_x))
np.savetxt("np_z.txt", np_z.flatten(), fmt="%.4f")
np.savetxt("z_expected.txt", z_expected.flatten(), fmt="%.4f")
self.assertTrue(np.allclose(np_z, z_expected, atol=1e-6))
np.testing.assert_allclose(np_z, z_expected, atol=1e-6)
class TestLog10(TestActivation):
......@@ -173,7 +173,7 @@ class TestLog10(TestActivation):
feed={"data_x": input_x},
fetch_list=[out1])
expected_res = np.log10(input_x)
self.assertTrue(np.allclose(res1, expected_res))
np.testing.assert_allclose(res1[0], expected_res, rtol=1e-6)
# dygraph
with fluid.dygraph.guard():
......@@ -182,7 +182,7 @@ class TestLog10(TestActivation):
z = paddle.log10(data_x)
np_z = z.numpy()
z_expected = np.array(np.log10(np_x))
self.assertTrue(np.allclose(np_z, z_expected))
np.testing.assert_allclose(np_z, z_expected, rtol=1e-4)
class TestLogHalf(TestLog):
......
......@@ -136,19 +136,20 @@ class TestNNLogSoftmaxAPI(unittest.TestCase):
ref_out = np.apply_along_axis(ref_log_softmax, axis, self.x)
logsoftmax = paddle.nn.LogSoftmax(axis)
paddle.enable_static()
# test static api
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data(name='x', shape=self.x_shape)
y = logsoftmax(x)
exe = paddle.static.Executor(self.place)
out = exe.run(feed={'x': self.x}, fetch_list=[y])
self.assertTrue(np.allclose(out[0], ref_out))
np.testing.assert_allclose(out[0], ref_out, rtol=1e-6)
# test dygrapg api
paddle.disable_static()
x = paddle.to_tensor(self.x)
y = logsoftmax(x)
self.assertTrue(np.allclose(y.numpy(), ref_out))
np.testing.assert_allclose(y.numpy(), ref_out, rtol=1e-6)
paddle.enable_static()
def test_check_api(self):
......@@ -177,12 +178,12 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
y = F.log_softmax(x, axis, dtype)
exe = paddle.static.Executor(self.place)
out = exe.run(feed={'x': self.x}, fetch_list=[y])
self.assertTrue(np.allclose(out[0], ref_out))
np.testing.assert_allclose(out[0], ref_out, rtol=1e-6)
paddle.disable_static()
x = paddle.to_tensor(self.x)
y = F.log_softmax(x, axis, dtype)
self.assertTrue(np.allclose(y.numpy(), ref_out), True)
np.testing.assert_allclose(y.numpy(), ref_out, rtol=1e-6)
paddle.enable_static()
def test_check_api(self):
......@@ -191,12 +192,14 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
self.check_api(-1, 'float32')
def test_errors(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data(name='X1', shape=[100], dtype='int32')
self.assertRaises(TypeError, F.log_softmax, x)
x = paddle.fluid.data(name='X2', shape=[100], dtype='float32')
self.assertRaises(TypeError, F.log_softmax, x, dtype='int32')
paddle.disable_static()
if __name__ == "__main__":
......
......@@ -316,7 +316,7 @@ class TestMergedMomentum(unittest.TestCase):
outs2 = run_op(False)
self.assertEqual(len(outs1), len(outs2))
for i, (out1, out2) in enumerate(zip(outs1, outs2)):
self.assertTrue(np.allclose(out1, out2, atol=1e-7))
np.testing.assert_allclose(out1, out2, atol=1e-7)
def test_main(self):
self.check_with_place(self.place, multi_precision=False)
......@@ -370,13 +370,13 @@ class TestMergedMomentum2(unittest.TestCase):
outs2 = run_op(use_nesterov=True, use_merged=False)
self.assertEqual(len(outs1), len(outs2))
for i, (out1, out2) in enumerate(zip(outs1, outs2)):
self.assertTrue(np.allclose(out1, out2, atol=1e-7))
np.testing.assert_allclose(out1, out2, atol=1e-7)
outs3 = run_op(use_nesterov=False, use_merged=True)
outs4 = run_op(use_nesterov=False, use_merged=False)
self.assertEqual(len(outs3), len(outs4))
for j, (out3, out4) in enumerate(zip(outs3, outs4)):
self.assertTrue(np.allclose(out3, out4, atol=1e-7))
np.testing.assert_allclose(out3, out4, atol=1e-7)
def test_main(self):
self.check_with_place(self.place, multi_precision=False)
......
......@@ -594,10 +594,10 @@ class TestNearestAPI(unittest.TestCase):
out_h=12,
out_w=12,
align_corners=False)
self.assertTrue(
np.allclose(results[0], np.transpose(expect_res, (0, 2, 3, 1))))
np.testing.assert_allclose(results[0],
np.transpose(expect_res, (0, 2, 3, 1)))
for i in range(len(results) - 1):
self.assertTrue(np.allclose(results[i + 1], expect_res))
np.testing.assert_allclose(results[i + 1], expect_res)
class TestNearestInterpException(unittest.TestCase):
......
......@@ -993,7 +993,7 @@ class TestDygraphPool2DAPI(unittest.TestCase):
paddings=[0, 0],
pool_type='max',
data_format='NHWC')
self.assertTrue(np.allclose(out1.numpy(), out2))
np.testing.assert_allclose(out1.numpy(), out2)
def test_lower_case(self):
with fluid.dygraph.guard():
......@@ -1010,7 +1010,7 @@ class TestDygraphPool2DAPI(unittest.TestCase):
paddings=[0, 0],
pool_type='max',
data_format='NHWC')
self.assertTrue(np.allclose(out1.numpy(), out2))
np.testing.assert_allclose(out1.numpy(), out2)
def test_upper_case(self):
with fluid.dygraph.guard():
......@@ -1027,7 +1027,7 @@ class TestDygraphPool2DAPI(unittest.TestCase):
paddings=[0, 0],
pool_type='max',
data_format='NHWC')
self.assertTrue(np.allclose(out1.numpy(), out2))
np.testing.assert_allclose(out1.numpy(), out2)
if __name__ == '__main__':
......
......@@ -160,61 +160,61 @@ class TestRandomValue(unittest.TestCase):
expect = [
24562, 8409, 9379, 10328, 20503, 18059, 9681, 21883, 11783, 27413
]
self.assertTrue(np.array_equal(x[0:10], expect))
np.testing.assert_allclose(x[0:10], expect)
expect = [
29477, 27100, 9643, 16637, 8605, 16892, 27767, 2724, 1612, 13096
]
self.assertTrue(np.array_equal(x[10000:10010], expect))
np.testing.assert_allclose(x[10000:10010], expect)
expect = [
298, 4104, 16479, 22714, 28684, 7510, 14667, 9950, 15940, 28343
]
self.assertTrue(np.array_equal(x[20000:20010], expect))
np.testing.assert_allclose(x[20000:20010], expect)
x = paddle.randperm(30000, dtype='int64').numpy()
expect = [
6587, 1909, 5525, 23001, 6488, 14981, 14355, 3083, 29561, 8171
]
self.assertTrue(np.array_equal(x[0:10], expect))
np.testing.assert_allclose(x[0:10], expect)
expect = [
23460, 12394, 22501, 5427, 20185, 9100, 5127, 1651, 25806, 4818
]
self.assertTrue(np.array_equal(x[10000:10010], expect))
np.testing.assert_allclose(x[10000:10010], expect)
expect = [5829, 4508, 16193, 24836, 8526, 242, 9984, 9243, 1977, 11839]
self.assertTrue(np.array_equal(x[20000:20010], expect))
np.testing.assert_allclose(x[20000:20010], expect)
x = paddle.randperm(30000, dtype='float32').numpy()
expect = [
5154., 10537., 14362., 29843., 27185., 28399., 27561., 4144.,
22906., 10705.
]
self.assertTrue(np.array_equal(x[0:10], expect))
np.testing.assert_allclose(x[0:10], expect)
expect = [
1958., 18414., 20090., 21910., 22746., 27346., 22347., 3002., 4564.,
26991.
]
self.assertTrue(np.array_equal(x[10000:10010], expect))
np.testing.assert_allclose(x[10000:10010], expect)
expect = [
25580., 12606., 553., 16387., 29536., 4241., 20946., 16899., 16339.,
4662.
]
self.assertTrue(np.array_equal(x[20000:20010], expect))
np.testing.assert_allclose(x[20000:20010], expect)
x = paddle.randperm(30000, dtype='float64').numpy()
expect = [
19051., 2449., 21940., 11121., 282., 7330., 13747., 24321., 21147.,
9163.
]
self.assertTrue(np.array_equal(x[0:10], expect))
np.testing.assert_allclose(x[0:10], expect)
expect = [
15483., 1315., 5723., 20954., 13251., 25539., 5074., 1823., 14945.,
17624.
]
self.assertTrue(np.array_equal(x[10000:10010], expect))
np.testing.assert_allclose(x[10000:10010], expect)
expect = [
10516., 2552., 29970., 5941., 986., 8007., 24805., 26753., 12202.,
21404.
]
self.assertTrue(np.array_equal(x[20000:20010], expect))
np.testing.assert_allclose(x[20000:20010], expect)
paddle.enable_static()
......
......@@ -163,8 +163,8 @@ class TestRelu6Net(unittest.TestCase):
cpu_pred, cpu_loss = self._test(False)
mlu_pred, mlu_loss = self._test(True)
self.assertTrue(np.allclose(mlu_pred, cpu_pred))
self.assertTrue(np.allclose(mlu_loss, cpu_loss))
np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-6)
np.testing.assert_allclose(mlu_loss, cpu_loss, rtol=1e-6)
if __name__ == '__main__':
......
......@@ -165,8 +165,8 @@ class TestReluNet(unittest.TestCase):
cpu_pred, cpu_loss = self._test(False)
mlu_pred, mlu_loss = self._test(True)
self.assertTrue(np.allclose(mlu_pred, cpu_pred))
self.assertTrue(np.allclose(mlu_loss, cpu_loss))
np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-6)
np.testing.assert_allclose(mlu_loss, cpu_loss, rtol=1e-6)
if __name__ == '__main__':
......
......@@ -212,7 +212,7 @@ class TestScatterAPI(unittest.TestCase):
mlu_value = mlu_exe.run(feed=feed, fetch_list=fetch)[0]
return mlu_value
self.assertTrue(np.array_equal(test_dygraph(), test_static_graph()))
np.testing.assert_allclose(test_dygraph(), test_static_graph())
class TestScatterOpFp16(OpTest):
......
......@@ -549,7 +549,7 @@ class TestSliceApiWithTensor(unittest.TestCase):
ends=paddle.to_tensor(ends, dtype='int32'))
a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends)
self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy()))
np.testing.assert_allclose(a_1.numpy(), a_2.numpy())
def test_bool_tensor(self):
with paddle.fluid.dygraph.guard():
......@@ -565,7 +565,7 @@ class TestSliceApiWithTensor(unittest.TestCase):
y_np = tt[0:3, 1:5, 2:4]
self.assertTrue(paddle.bool == y_paddle.dtype)
self.assertTrue(np.array_equal(y_paddle.numpy(), y_np))
np.testing.assert_array_equal(y_paddle.numpy(), y_np)
class TestImperativeVarBaseGetItem(unittest.TestCase):
......@@ -620,11 +620,11 @@ class TestInferShape(unittest.TestCase):
100,
], [0], [1])
np_slice = x_arr[:, :, 0:1]
self.assertTrue(np.array_equal(pp_slice, np_slice))
np.testing.assert_allclose(pp_slice, np_slice)
pp_slice = paddle.slice(x, (-100, ), [0], [1])
np_slice = x_arr[0:1]
self.assertTrue(np.array_equal(pp_slice, np_slice))
np.testing.assert_allclose(pp_slice, np_slice)
x_arr = np.array([], dtype=np.float32)
x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0)))
......
......@@ -157,8 +157,8 @@ class TestPowNet(unittest.TestCase):
cpu_pred, cpu_loss = self._test(False)
mlu_pred, mlu_loss = self._test(True)
self.assertTrue(np.allclose(mlu_pred, cpu_pred))
self.assertTrue(np.allclose(mlu_loss, cpu_loss))
np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-5)
np.testing.assert_allclose(mlu_loss, cpu_loss)
if __name__ == '__main__':
......
......@@ -108,8 +108,8 @@ class API_TestSplit(unittest.TestCase):
input1 = np.random.random([1, 10]).astype('float32')
r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1])
ex_x0, ex_x1 = np.split(input1, (3, ), axis=1)
self.assertTrue(np.allclose(ex_x0, r0))
self.assertTrue(np.allclose(ex_x1, r1))
np.testing.assert_allclose(ex_x0, r0)
np.testing.assert_allclose(ex_x1, r1)
class API_TestSplit2(unittest.TestCase):
......@@ -123,8 +123,8 @@ class API_TestSplit2(unittest.TestCase):
input1 = np.random.random([1, 10]).astype('float32')
r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1])
ex_x0, ex_x1 = np.split(input1, 2, axis=1)
self.assertTrue(np.allclose(ex_x0, r0))
self.assertTrue(np.allclose(ex_x1, r1))
np.testing.assert_allclose(ex_x0, r0)
np.testing.assert_allclose(ex_x1, r1)
class API_TestDygraphSplit(unittest.TestCase):
......@@ -139,9 +139,9 @@ class API_TestDygraphSplit(unittest.TestCase):
x1_out = x1.numpy()
x2_out = x2.numpy()
ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
self.assertTrue(np.allclose(ex_x0, x0_out))
self.assertTrue(np.allclose(ex_x1, x1_out))
self.assertTrue(np.allclose(ex_x2, x2_out))
np.testing.assert_allclose(ex_x0, x0_out)
np.testing.assert_allclose(ex_x1, x1_out)
np.testing.assert_allclose(ex_x2, x2_out)
def test_out2(self):
with fluid.dygraph.guard(paddle.MLUPlace(0)):
......@@ -153,9 +153,9 @@ class API_TestDygraphSplit(unittest.TestCase):
x1_out = x1.numpy()
x2_out = x2.numpy()
ex_x0, ex_x1, ex_x2 = np.split(input_1, (1, 3), axis=1)
self.assertTrue(np.allclose(ex_x0, x0_out))
self.assertTrue(np.allclose(ex_x1, x1_out))
self.assertTrue(np.allclose(ex_x2, x2_out))
np.testing.assert_allclose(ex_x0, x0_out)
np.testing.assert_allclose(ex_x1, x1_out)
np.testing.assert_allclose(ex_x2, x2_out)
# attr(axis) is Tensor
......
......@@ -59,7 +59,7 @@ class TestL2LossDeterministic(unittest.TestCase):
x = paddle.to_tensor(x_np)
y1 = _C_ops.squared_l2_norm(x)
y2 = _C_ops.squared_l2_norm(x)
self.assertTrue(np.array_equal(y1.numpy(), y2.numpy()))
np.testing.assert_allclose(y1.numpy(), y2.numpy())
def test_main(self):
self.check_place(paddle.CPUPlace())
......
......@@ -148,7 +148,7 @@ class API_test(unittest.TestCase):
},
fetch_list=[result_stack])
expected_result = np.stack([input1, input2, input3], axis=0)
self.assertTrue(np.allclose(expected_result, result))
np.testing.assert_allclose(expected_result, result)
def test_single_tensor_error(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
......@@ -169,14 +169,14 @@ class API_DygraphTest(unittest.TestCase):
result = paddle.stack([x1, x2, x3])
result_np = result.numpy()
expected_result = np.stack([data1, data2, data3])
self.assertTrue(np.allclose(expected_result, result_np))
np.testing.assert_allclose(expected_result, result_np)
with fluid.dygraph.guard(place=paddle.MLUPlace(0)):
y1 = fluid.dygraph.to_variable(data1)
result = paddle.stack([y1], axis=0)
result_np_2 = result.numpy()
expected_result_2 = np.stack([data1], axis=0)
self.assertTrue(np.allclose(expected_result_2, result_np_2))
np.testing.assert_allclose(expected_result_2, result_np_2)
def test_single_tensor_error(self):
with fluid.dygraph.guard(place=paddle.MLUPlace(0)):
......
......@@ -145,8 +145,8 @@ class TestTanhNet(unittest.TestCase):
cpu_pred, cpu_loss = self._test(False)
mlu_pred, mlu_loss = self._test(True)
self.assertTrue(np.allclose(mlu_pred, cpu_pred))
self.assertTrue(np.allclose(mlu_loss, cpu_loss))
np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-6)
np.testing.assert_allclose(mlu_loss, cpu_loss)
if __name__ == '__main__':
......
......@@ -189,43 +189,43 @@ class TestTopKAPI(unittest.TestCase):
# test case for basic test case 1
paddle_result = paddle.topk(input_tensor, k=2)
numpy_result = numpy_topk(self.input_data, k=2)
self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0]))
self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1]))
np.testing.assert_allclose(paddle_result[0].numpy(), numpy_result[0])
np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1])
# test case for basic test case 2 with axis
paddle_result = paddle.topk(input_tensor, k=2, axis=1)
numpy_result = numpy_topk(self.input_data, k=2, axis=1)
self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0]))
self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1]))
np.testing.assert_allclose(paddle_result[0].numpy(), numpy_result[0])
np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1])
# test case for basic test case 3 with tensor K
k_tensor = paddle.to_tensor(np.array([2]))
paddle_result = paddle.topk(input_tensor, k=k_tensor, axis=1)
numpy_result = numpy_topk(self.input_data, k=2, axis=1)
self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0]))
self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1]))
np.testing.assert_allclose(paddle_result[0].numpy(), numpy_result[0])
np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1])
# test case for basic test case 4 with tensor largest
k_tensor = paddle.to_tensor(np.array([2]))
paddle_result = paddle.topk(input_tensor, k=2, axis=1, largest=False)
numpy_result = numpy_topk(self.input_data, k=2, axis=1, largest=False)
self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0]))
self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1]))
np.testing.assert_allclose(paddle_result[0].numpy(), numpy_result[0])
np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1])
# test case for basic test case 5 with axis -1
k_tensor = paddle.to_tensor(np.array([2]))
paddle_result = paddle.topk(input_tensor, k=2, axis=-1, largest=False)
numpy_result = numpy_topk(self.input_data, k=2, axis=-1, largest=False)
self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0]))
self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1]))
np.testing.assert_allclose(paddle_result[0].numpy(), numpy_result[0])
np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1])
# test case for basic test case 6 for the partial sort
paddle_result = paddle.topk(large_input_tensor, k=1, axis=-1)
numpy_result = numpy_topk(self.large_input_data, k=1, axis=-1)
self.assertTrue(np.allclose(paddle_result[0].numpy(), numpy_result[0]))
self.assertTrue(np.allclose(paddle_result[1].numpy(), numpy_result[1]))
np.testing.assert_allclose(paddle_result[0].numpy(), numpy_result[0])
np.testing.assert_allclose(paddle_result[1].numpy(), numpy_result[1])
# test case for basic test case 7 for the unsorted
paddle_result = paddle.topk(input_tensor, k=2, axis=1, sorted=False)
sort_paddle = numpy_topk(np.array(paddle_result[0].numpy()),
axis=1,
k=2)
numpy_result = numpy_topk(self.input_data, k=2, axis=1)
self.assertTrue(np.allclose(sort_paddle[0], numpy_result[0]))
np.testing.assert_allclose(sort_paddle[0], numpy_result[0])
def run_static(self, place):
paddle.enable_static()
......@@ -263,32 +263,32 @@ class TestTopKAPI(unittest.TestCase):
result7[0], result7[1]
])
numpy_result = numpy_topk(self.input_data, k=2)
self.assertTrue(np.allclose(paddle_result[0], numpy_result[0]))
self.assertTrue(np.allclose(paddle_result[1], numpy_result[1]))
np.testing.assert_allclose(paddle_result[0], numpy_result[0])
np.testing.assert_allclose(paddle_result[1], numpy_result[1])
numpy_result = numpy_topk(self.input_data, k=2, axis=-1)
self.assertTrue(np.allclose(paddle_result[2], numpy_result[0]))
self.assertTrue(np.allclose(paddle_result[3], numpy_result[1]))
np.testing.assert_allclose(paddle_result[2], numpy_result[0])
np.testing.assert_allclose(paddle_result[3], numpy_result[1])
numpy_result = numpy_topk(self.input_data, k=2, axis=1)
self.assertTrue(np.allclose(paddle_result[4], numpy_result[0]))
self.assertTrue(np.allclose(paddle_result[5], numpy_result[1]))
np.testing.assert_allclose(paddle_result[4], numpy_result[0])
np.testing.assert_allclose(paddle_result[5], numpy_result[1])
numpy_result = numpy_topk(self.input_data,
k=2,
axis=1,
largest=False)
self.assertTrue(np.allclose(paddle_result[6], numpy_result[0]))
self.assertTrue(np.allclose(paddle_result[7], numpy_result[1]))
np.testing.assert_allclose(paddle_result[6], numpy_result[0])
np.testing.assert_allclose(paddle_result[7], numpy_result[1])
numpy_result = numpy_topk(self.input_data,
k=2,
axis=-1,
largest=False)
self.assertTrue(np.allclose(paddle_result[8], numpy_result[0]))
self.assertTrue(np.allclose(paddle_result[9], numpy_result[1]))
np.testing.assert_allclose(paddle_result[8], numpy_result[0])
np.testing.assert_allclose(paddle_result[9], numpy_result[1])
numpy_result = numpy_topk(self.large_input_data, k=1, axis=-1)
self.assertTrue(np.allclose(paddle_result[10], numpy_result[0]))
self.assertTrue(np.allclose(paddle_result[11], numpy_result[1]))
np.testing.assert_allclose(paddle_result[10], numpy_result[0])
np.testing.assert_allclose(paddle_result[11], numpy_result[1])
sort_paddle = numpy_topk(paddle_result[12], axis=1, k=2)
numpy_result = numpy_topk(self.input_data, k=2, axis=1)
self.assertTrue(np.allclose(sort_paddle[0], numpy_result[0]))
np.testing.assert_allclose(sort_paddle[0], numpy_result[0])
def test_cases(self):
places = [core.CPUPlace()]
......
......@@ -154,8 +154,8 @@ class TestTrilTriuOpAPI(unittest.TestCase):
feed={"x": data},
fetch_list=[tril_out, triu_out],
)
self.assertTrue(np.allclose(tril_out, np.tril(data)))
self.assertTrue(np.allclose(triu_out, np.triu(data)))
np.testing.assert_allclose(tril_out, np.tril(data))
np.testing.assert_allclose(triu_out, np.triu(data))
def test_api_with_dygraph(self):
paddle.disable_static()
......@@ -167,8 +167,8 @@ class TestTrilTriuOpAPI(unittest.TestCase):
x = fluid.dygraph.to_variable(data)
tril_out, triu_out = tensor.tril(x).numpy(), tensor.triu(
x).numpy()
self.assertTrue(np.allclose(tril_out, np.tril(data)))
self.assertTrue(np.allclose(triu_out, np.triu(data)))
np.testing.assert_allclose(tril_out, np.tril(data))
np.testing.assert_allclose(triu_out, np.triu(data))
def test_fluid_api(self):
paddle.enable_static()
......
......@@ -71,8 +71,11 @@ class TestMLUUniformRandomOp(OpTest):
def verify_output(self, outs):
hist, prob = self.output_hist(np.array(outs[0]))
self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01),
"hist: " + str(hist))
np.testing.assert_allclose(hist,
prob,
rtol=0,
atol=0.01,
err_msg="hist: " + str(hist))
class TestMLUUniformRandomOpSelectedRows(unittest.TestCase):
......@@ -100,8 +103,11 @@ class TestMLUUniformRandomOpSelectedRows(unittest.TestCase):
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [1000, 784])
hist, prob = output_hist(np.array(out.get_tensor()))
self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01),
"hist: " + str(hist))
np.testing.assert_allclose(hist,
prob,
rtol=0,
atol=0.01,
err_msg="hist: " + str(hist))
if __name__ == "__main__":
......
......@@ -288,7 +288,7 @@ class TestWhereDygraphAPI(unittest.TestCase):
result = paddle.where(cond, a, b)
result = result.numpy()
expect = np.where(cond, a, b)
self.assertTrue(np.array_equal(expect, result))
np.testing.assert_array_equal(expect, result)
def test_dygraph_api_broadcast_1(self):
cond_shape = [2, 4]
......@@ -351,7 +351,7 @@ class TestWhereDygraphAPI(unittest.TestCase):
fetch_list=[z.name],
return_numpy=False)
expect_out = np.array([[0, 0], [1, 1]])
self.assertTrue(np.allclose(expect_out, np.array(res)))
np.testing.assert_allclose(expect_out, np.array(res))
data = np.array([True, True, False])
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[(-1)])
......@@ -364,7 +364,7 @@ class TestWhereDygraphAPI(unittest.TestCase):
fetch_list=[z.name],
return_numpy=False)
expect_out = np.array([[0], [1]])
self.assertTrue(np.allclose(expect_out, np.array(res)))
np.testing.assert_allclose(expect_out, np.array(res))
class TestWhereOpError(unittest.TestCase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册