未验证 提交 2de0d676 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][NPU] use np.testing.assert_allclose instead of...

[CodeStyle][NPU] use np.testing.assert_allclose instead of self.assertTrue(np.allclose(...)) (part 1) (#44988)

* autofix

* try resolve precision issues

* revert some changes

* clean some `err_msg`

* 0.0001 -> 1e-4

* update commented assert code

* try to fix some shape errors

* `numpy` -> `np`

* empty commit, trigger kunlun ci, test=kunlun

* empty commit, retrigger kunlun ci, test=kunlun

* empty commit, trigger kunlun ci, try fix npu memcpy_h2d, test=kunlun

* try fix npu import error, test=kunlun
上级 2594935a
......@@ -123,13 +123,13 @@ class TestWeightQuantization(unittest.TestCase):
res_fp16 = self.run_models(save_model_dir, model_filename,
params_filename, input_data, True)
self.assertTrue(
np.allclose(res_fp32,
res_fp16,
rtol=1e-5,
atol=1e-08,
equal_nan=True),
msg='Failed to test the accuracy of the fp32 and fp16 model.')
np.testing.assert_allclose(
res_fp32,
res_fp16,
rtol=1e-05,
atol=1e-08,
equal_nan=True,
err_msg='Failed to test the accuracy of the fp32 and fp16 model.')
try:
os.system("rm -rf {}".format(save_model_dir))
......
......@@ -114,7 +114,7 @@ class TestCorrelationOp(unittest.TestCase):
},
fetch_list=[out.name, loss.name])
self.assertTrue(np.allclose(res[0], out_np))
np.testing.assert_allclose(res[0], out_np, rtol=1e-05, atol=1e-8)
class Net(fluid.dygraph.Layer):
......@@ -159,7 +159,7 @@ class TestCorrelationOpDyGraph(unittest.TestCase):
corr_pd = Net('corr_pd')
y = corr_pd(x1, x2)
out = y.numpy()
self.assertTrue(np.allclose(out, out_np))
np.testing.assert_allclose(out, out_np, rtol=1e-05, atol=1e-8)
if __name__ == '__main__':
......
......@@ -132,10 +132,12 @@ class TestModelCastBF16(unittest.TestCase):
amp_fun=_amp_fun,
startup_prog=startup_prog)
self.assertTrue(
np.allclose(cutf(static_ret_bf16), cutf(static_ret), 1e-2))
self.assertTrue(
np.allclose(cutf(static_ret_bf16), cutf(ret_fp32bf16), 1e-2))
np.testing.assert_allclose(cutf(static_ret_bf16),
cutf(static_ret),
rtol=0.01)
np.testing.assert_allclose(cutf(static_ret_bf16),
cutf(ret_fp32bf16),
rtol=0.01)
with self.static_graph():
t = layers.data(name='t', shape=[size, size], dtype='float32')
......
......@@ -228,18 +228,18 @@ class TestImageMultiPrecision(unittest.TestCase):
use_nesterov=use_nesterov,
optimizer=optimizer)
self.assertTrue(np.allclose(np.array(train_loss_fp16),
np.array(train_loss_fp32),
rtol=1e-02,
atol=1e-05,
equal_nan=True),
msg='Failed to train in pure FP16.')
self.assertTrue(np.allclose(np.array(test_loss_fp16),
np.array(test_loss_fp32),
rtol=1e-02,
atol=1e-05,
equal_nan=True),
msg='Failed to test in pure FP16.')
np.testing.assert_allclose(np.array(train_loss_fp16),
np.array(train_loss_fp32),
rtol=0.01,
atol=1e-05,
equal_nan=True,
err_msg='Failed to train in pure FP16.')
np.testing.assert_allclose(np.array(test_loss_fp16),
np.array(test_loss_fp32),
rtol=0.01,
atol=1e-05,
equal_nan=True,
err_msg='Failed to test in pure FP16.')
do_test(use_nesterov=False)
do_test(use_nesterov=True)
......
......@@ -185,9 +185,12 @@ class TestWeightDecay(unittest.TestCase):
param_sum2 = self.check_weight_decay2(place, model)
for i in range(len(param_sum1)):
self.assertTrue(
np.allclose(param_sum1[i], param_sum2[i]),
"Current place: {}, i: {}, sum1: {}, sum2: {}".format(
np.testing.assert_allclose(
param_sum1[i],
param_sum2[i],
rtol=1e-05,
err_msg='Current place: {}, i: {}, sum1: {}, sum2: {}'.
format(
place, i, param_sum1[i]
[~np.isclose(param_sum1[i], param_sum2[i])],
param_sum2[i]
......
......@@ -77,18 +77,24 @@ class TestCustomTanhDoubleGradJit(unittest.TestCase):
custom_ops.custom_tanh, device, dtype, x)
pd_out, pd_dx_grad, pd_dout = custom_tanh_double_grad_dynamic(
paddle.tanh, device, dtype, x)
self.assertTrue(
np.allclose(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
np.testing.assert_allclose(
out,
pd_out,
rtol=1e-05,
err_msg='custom op out: {},\n paddle api out: {}'.format(
out, pd_out))
self.assertTrue(
np.allclose(dx_grad, pd_dx_grad),
"custom op dx grad: {},\n paddle api dx grad: {}".format(
dx_grad, pd_dx_grad))
self.assertTrue(
np.allclose(dout, pd_dout),
"custom op out grad: {},\n paddle api out grad: {}".format(
dout, pd_dout))
np.testing.assert_allclose(
dx_grad,
pd_dx_grad,
rtol=1e-05,
err_msg='custom op dx grad: {},\n paddle api dx grad: {}'.
format(dx_grad, pd_dx_grad))
np.testing.assert_allclose(
dout,
pd_dout,
rtol=1e-05,
err_msg='custom op out grad: {},\n paddle api out grad: {}'.
format(dout, pd_dout))
def test_func_double_grad_dynamic(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
......
......@@ -201,9 +201,12 @@ class TestIfElse(unittest.TestCase):
fetch_list=[out])
o2 = self.numpy_cal()
self.assertTrue(
np.allclose(o1, o2, atol=1e-8),
"IfElse result : " + str(o1) + "\n Numpy result :" + str(o2))
np.testing.assert_allclose(
o1,
o2,
rtol=1e-05,
atol=1e-08,
)
def test_cpu(self):
self.compare_ifelse_op_and_numpy(fluid.CPUPlace())
......
......@@ -46,9 +46,10 @@ class BenchmarkSuite(OpTest):
actual_t = np.array(item_gpu_out)
var_name = variable if isinstance(
variable, six.string_types) else variable.name
self.assertTrue(
np.allclose(actual_t, expect_t, atol=atol), "Output (" +
var_name + ") has diff" + str(actual_t) + "\n" + str(expect_t))
np.testing.assert_allclose(actual_t,
expect_t,
rtol=1e-05,
atol=atol)
self.assertListEqual(actual.lod(), expect.lod(),
"Output (" + var_name + ") has different lod")
......
......@@ -105,12 +105,11 @@ class DistPassTestBase(unittest.TestCase):
if out_var_no_pass is None:
self.assertTrue(out_var_pass is None)
else:
self.assertTrue(
np.allclose(out_var_no_pass,
out_var_pass,
rtol=self.rtol,
atol=self.atol,
equal_nan=self.equal_nan))
np.testing.assert_allclose(out_var_no_pass,
out_var_pass,
rtol=self.rtol,
atol=self.atol,
equal_nan=self.equal_nan)
@classmethod
def _to_var_names(cls, names_or_vars):
......
......@@ -104,9 +104,7 @@ class TestDygraphBasicApi_ToVariable(unittest.TestCase):
self.dygraph_func = func
dygraph_res = self.get_dygraph_output()
static_res = self.get_static_output()
self.assertTrue(np.allclose(dygraph_res, static_res),
msg='dygraph is {}\n static_res is {}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
# 1. test Apis that inherit from layers.Layer
......@@ -252,9 +250,7 @@ class TestDygraphBasicApi(unittest.TestCase):
def test_transformed_static_result(self):
dygraph_res = self.get_dygraph_output()
static_res = self.get_static_output()
self.assertTrue(np.allclose(dygraph_res, static_res),
msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
class TestDygraphBasicApi_BilinearTensorProduct(TestDygraphBasicApi):
......@@ -419,9 +415,7 @@ class TestDygraphBasicApi_CosineDecay(unittest.TestCase):
def test_transformed_static_result(self):
dygraph_res = self.get_dygraph_output()
static_res = self.get_static_output()
self.assertTrue(np.allclose(dygraph_res, static_res),
msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
class TestDygraphBasicApi_ExponentialDecay(TestDygraphBasicApi_CosineDecay):
......
......@@ -181,12 +181,8 @@ class TestBert(unittest.TestCase):
self.data_reader)
dygraph_loss, dygraph_ppl = self.train_dygraph(self.bert_config,
self.data_reader)
self.assertTrue(np.allclose(static_loss, dygraph_loss),
msg="static_loss: {} \n dygraph_loss: {}".format(
static_loss, dygraph_loss))
self.assertTrue(np.allclose(static_ppl, dygraph_ppl),
msg="static_ppl: {} \n dygraph_ppl: {}".format(
static_ppl, dygraph_ppl))
np.testing.assert_allclose(static_loss, dygraph_loss, rtol=1e-05)
np.testing.assert_allclose(static_ppl, dygraph_ppl, rtol=1e-05)
self.verify_predict()
......@@ -200,19 +196,25 @@ class TestBert(unittest.TestCase):
for dy_res, st_res, dy_jit_res, predictor_res in zip(
dygraph_pred_res, static_pred_res, dygraph_jit_pred_res,
predictor_pred_res):
self.assertTrue(
np.allclose(st_res, dy_res),
"dygraph_res: {},\n static_res: {}".format(
np.testing.assert_allclose(
st_res,
dy_res,
rtol=1e-05,
err_msg='dygraph_res: {},\n static_res: {}'.format(
dy_res[~np.isclose(st_res, dy_res)],
st_res[~np.isclose(st_res, dy_res)]))
self.assertTrue(
np.allclose(st_res, dy_jit_res),
"dygraph_jit_res: {},\n static_res: {}".format(
np.testing.assert_allclose(
st_res,
dy_jit_res,
rtol=1e-05,
err_msg='dygraph_jit_res: {},\n static_res: {}'.format(
dy_jit_res[~np.isclose(st_res, dy_jit_res)],
st_res[~np.isclose(st_res, dy_jit_res)]))
self.assertTrue(
np.allclose(st_res, predictor_res),
"dygraph_jit_res: {},\n static_res: {}".format(
np.testing.assert_allclose(
st_res,
predictor_res,
rtol=1e-05,
err_msg='dygraph_jit_res: {},\n static_res: {}'.format(
predictor_res[~np.isclose(st_res, predictor_res)],
st_res[~np.isclose(st_res, predictor_res)]))
break
......
......@@ -705,11 +705,14 @@ class TestTrain(unittest.TestCase):
static_res = self.train_bmn(self.args, self.place, to_static=True)
dygraph_res = self.train_bmn(self.args, self.place, to_static=False)
self.assertTrue(
np.allclose(dygraph_res, static_res),
"dygraph_res: {},\n static_res: {}".format(
np.testing.assert_allclose(
dygraph_res,
static_res,
rtol=1e-05,
err_msg='dygraph_res: {},\n static_res: {}'.format(
dygraph_res[~np.isclose(dygraph_res, static_res)],
static_res[~np.isclose(dygraph_res, static_res)]))
static_res[~np.isclose(dygraph_res, static_res)]),
atol=1e-8)
# Prediction needs trained models, so put `test_predict` at last of `test_train`
self.verify_predict()
......@@ -728,21 +731,30 @@ class TestTrain(unittest.TestCase):
for dy_res, st_res, dy_jit_res, predictor_res in zip(
dygraph_pred_res, static_pred_res, dygraph_jit_pred_res,
predictor_pred_res):
self.assertTrue(
np.allclose(st_res, dy_res),
"dygraph_res: {},\n static_res: {}".format(
np.testing.assert_allclose(
st_res,
dy_res,
rtol=1e-05,
err_msg='dygraph_res: {},\n static_res: {}'.format(
dy_res[~np.isclose(st_res, dy_res)],
st_res[~np.isclose(st_res, dy_res)]))
self.assertTrue(
np.allclose(st_res, dy_jit_res),
"dygraph_jit_res: {},\n static_res: {}".format(
st_res[~np.isclose(st_res, dy_res)]),
atol=1e-8)
np.testing.assert_allclose(
st_res,
dy_jit_res,
rtol=1e-05,
err_msg='dygraph_jit_res: {},\n static_res: {}'.format(
dy_jit_res[~np.isclose(st_res, dy_jit_res)],
st_res[~np.isclose(st_res, dy_jit_res)]))
self.assertTrue(
np.allclose(st_res, predictor_res),
"dygraph_jit_res: {},\n static_res: {}".format(
st_res[~np.isclose(st_res, dy_jit_res)]),
atol=1e-8)
np.testing.assert_allclose(
st_res,
predictor_res,
rtol=1e-05,
err_msg='dygraph_jit_res: {},\n static_res: {}'.format(
predictor_res[~np.isclose(st_res, predictor_res)],
st_res[~np.isclose(st_res, predictor_res)]))
st_res[~np.isclose(st_res, predictor_res)]),
atol=1e-8)
break
def predict_dygraph(self, data):
......
......@@ -230,9 +230,12 @@ class TestContinueInFor(unittest.TestCase):
def test_transformed_static_result(self):
static_res = self.run_static_mode()
dygraph_res = self.run_dygraph_mode()
self.assertTrue(np.allclose(dygraph_res, static_res),
msg='dygraph res is {}\nstatic_res is {}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(
dygraph_res,
static_res,
rtol=1e-05,
err_msg='dygraph res is {}\nstatic_res is {}'.format(
dygraph_res, static_res))
class TestContinueInForAtEnd(TestContinueInFor):
......
......@@ -54,9 +54,11 @@ class TestCacheProgram(unittest.TestCase):
prev_out, (tuple, list)) else prev_out.numpy()
cur_out_numpy = cur_out[0].numpy() if isinstance(
cur_out, (tuple, list)) else cur_out.numpy()
self.assertTrue(
np.allclose(prev_out_numpy, cur_out_numpy),
msg=
np.testing.assert_allclose(
prev_out_numpy,
cur_out_numpy,
rtol=1e-05,
err_msg=
'Output in previous batch is {}\n Output in current batch is \n{}'
.format(prev_out_numpy, cur_out_numpy))
self.assertEqual(prev_ops, cur_ops)
......@@ -106,9 +108,12 @@ class TestCacheProgramWithOptimizer(unittest.TestCase):
def test_with_optimizer(self):
dygraph_loss = self.train_dygraph()
static_loss = self.train_static()
self.assertTrue(np.allclose(dygraph_loss, static_loss),
msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_loss, static_loss))
np.testing.assert_allclose(
dygraph_loss,
static_loss,
rtol=1e-05,
err_msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_loss, static_loss))
def simple_func(x):
......
......@@ -91,9 +91,11 @@ class TestCastBase(unittest.TestCase):
msg='The target dtype is {}, but the casted dtype is {}.'.format(
self.cast_dtype, res.dtype))
ref_val = self.input.astype(self.cast_dtype)
self.assertTrue(
np.allclose(res, ref_val),
msg='The casted value is {}.\nThe correct value is {}.'.format(
np.testing.assert_allclose(
res,
ref_val,
rtol=1e-05,
err_msg='The casted value is {}.\nThe correct value is {}.'.format(
res, ref_val))
......@@ -149,9 +151,11 @@ class TestMixCast(TestCastBase):
self.cast_dtype, res.dtype))
ref_val = self.input.astype(self.cast_int).astype(
self.cast_float).astype(self.cast_bool).astype(self.cast_dtype)
self.assertTrue(
np.allclose(res, ref_val),
msg='The casted value is {}.\nThe correct value is {}.'.format(
np.testing.assert_allclose(
res,
ref_val,
rtol=1e-05,
err_msg='The casted value is {}.\nThe correct value is {}.'.format(
res, ref_val))
......
......@@ -96,9 +96,11 @@ class TestSequential(unittest.TestCase):
out = self.net(x)
if to_static:
load_out = self._test_load(self.net, x)
self.assertTrue(np.allclose(load_out, out),
msg='load_out is {}\st_out is {}'.format(
load_out, out))
np.testing.assert_allclose(
load_out,
out,
rtol=1e-05,
err_msg='load_out is {}\\st_out is {}'.format(load_out, out))
return out
......@@ -106,9 +108,12 @@ class TestSequential(unittest.TestCase):
paddle.jit.set_code_level(100)
dy_out = self._run(to_static=False)
st_out = self._run(to_static=True)
self.assertTrue(np.allclose(dy_out, st_out),
msg='dygraph_res is {}\nstatic_res is {}'.format(
dy_out, st_out))
np.testing.assert_allclose(
dy_out,
st_out,
rtol=1e-05,
err_msg='dygraph_res is {}\nstatic_res is {}'.format(
dy_out, st_out))
def _test_load(self, net, x):
paddle.jit.save(net, self.model_path)
......
......@@ -91,9 +91,12 @@ class TestRecursiveCall1(unittest.TestCase):
def test_transformed_static_result(self):
static_res = self.get_static_output()
dygraph_res = self.get_dygraph_output()
self.assertTrue(np.allclose(dygraph_res, static_res),
msg='dygraph res is {}\nstatic_res is {}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(
dygraph_res,
static_res,
rtol=1e-05,
err_msg='dygraph res is {}\nstatic_res is {}'.format(
dygraph_res, static_res))
lambda_fun = lambda x: x
......@@ -176,9 +179,7 @@ class TestRecursiveCall2(unittest.TestCase):
def test_transformed_static_result(self):
dygraph_res = self.get_dygraph_output()
static_res = self.get_static_output()
self.assertTrue(np.allclose(dygraph_res, static_res),
msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
class TestThirdPartyLibrary(TestRecursiveCall2):
......
......@@ -46,9 +46,9 @@ class TestToTensor(unittest.TestCase):
x = paddle.to_tensor([3])
print(paddle.jit.to_static(func).code)
self.assertTrue(
np.allclose(
paddle.jit.to_static(func)(x).numpy(), np.array([1, 2, 3, 4])))
np.testing.assert_allclose(paddle.jit.to_static(func)(x).numpy(),
np.array([1, 2, 3, 4]),
rtol=1e-05)
class TestToTensor1(unittest.TestCase):
......@@ -66,9 +66,9 @@ class TestToTensor1(unittest.TestCase):
x = paddle.to_tensor([3])
print(paddle.jit.to_static(func).code)
self.assertTrue(
np.allclose(
paddle.jit.to_static(func)(x).numpy(), np.array([1, 2, 3, 4])))
np.testing.assert_allclose(paddle.jit.to_static(func)(x).numpy(),
np.array([1, 2, 3, 4]),
rtol=1e-05)
class TestToTensor2(unittest.TestCase):
......@@ -81,10 +81,9 @@ class TestToTensor2(unittest.TestCase):
x = paddle.to_tensor([3])
print(paddle.jit.to_static(func).code)
self.assertTrue(
np.allclose(
paddle.jit.to_static(func)(x).numpy(),
np.array([[1], [2], [3], [4]])))
np.testing.assert_allclose(paddle.jit.to_static(func)(x).numpy(),
np.array([[1], [2], [3], [4]]),
rtol=1e-05)
if __name__ == '__main__':
......
......@@ -127,7 +127,7 @@ class TestInputSpec(unittest.TestCase):
jit.save(net, self.model_path)
infer_net = fluid.dygraph.jit.load(self.model_path)
pred = infer_net(x)
self.assertTrue(np.allclose(out.numpy(), pred.numpy()))
np.testing.assert_allclose(out.numpy(), pred.numpy(), rtol=1e-05)
# 3. we can decorate any method
x_2 = to_variable(np.ones([4, 20]).astype('float32'))
......@@ -218,25 +218,33 @@ class TestDifferentInputSpecCacheProgram(unittest.TestCase):
# [16, 10] + [10] (varbase)
out_1 = foo(to_variable(x_data), to_variable(y_data))
self.assertTrue(np.allclose(x_data + y_data, out_1.numpy()))
np.testing.assert_allclose(x_data + y_data,
out_1.numpy(),
rtol=1e-05)
self.assertTrue(len(foo.program_cache) == 1)
self.assertTrue(len(foo.program_cache.concrete_programs()) == 1)
first_program = foo.program_cache.last()
# [16, 10] + [10] (numpy)
out_2 = foo(to_variable(x_data), y_data)
self.assertTrue(np.allclose(x_data + y_data, out_2.numpy()))
np.testing.assert_allclose(x_data + y_data,
out_2.numpy(),
rtol=1e-05)
self.assertTrue(len(foo.program_cache) == 1)
# [16, 10] + [10] (numpy)
out_3 = foo(to_variable(x_data), z_data)
self.assertTrue(np.allclose(x_data + z_data, out_3.numpy()))
np.testing.assert_allclose(x_data + z_data,
out_3.numpy(),
rtol=1e-05)
# hit cache program
self.assertTrue(len(foo.program_cache) == 1)
# [16, 10] + [10] (numpy) with other different arguments (c=3)
out_4 = foo(to_variable(x_data), z_data, 3)
self.assertTrue(np.allclose(x_data + z_data, out_4.numpy()))
np.testing.assert_allclose(x_data + z_data,
out_4.numpy(),
rtol=1e-05)
# create a new program
self.assertTrue(len(foo.program_cache) == 2)
......
......@@ -195,9 +195,12 @@ class TestDictPop(unittest.TestCase):
def test_transformed_result(self):
dygraph_res = self._run_dygraph()
static_res = self._run_static()
self.assertTrue(np.allclose(dygraph_res, static_res),
msg='dygraph result is {}\nstatic result is {}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(
dygraph_res,
static_res,
rtol=1e-05,
err_msg='dygraph result is {}\nstatic result is {}'.format(
dygraph_res, static_res))
class TestDictPop2(TestDictPop):
......
......@@ -48,9 +48,9 @@ class TestTrainEval(unittest.TestCase):
eval_out = x.numpy()
train_out = x.numpy() * 2
self.model.train()
self.assertTrue(np.allclose(self.model(x).numpy(), train_out))
np.testing.assert_allclose(self.model(x).numpy(), train_out, rtol=1e-05)
self.model.eval()
self.assertTrue(np.allclose(self.model(x).numpy(), eval_out))
np.testing.assert_allclose(self.model(x).numpy(), eval_out, rtol=1e-05)
if __name__ == "__main__":
......
......@@ -92,9 +92,12 @@ class TestPool2D(unittest.TestCase):
dygraph_res = self.train_dygraph()
static_res = self.train_static()
self.assertTrue(np.allclose(dygraph_res, static_res),
msg='dygraph_res is {}\n static_res is \n{}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(
dygraph_res,
static_res,
rtol=1e-05,
err_msg='dygraph_res is {}\n static_res is \n{}'.format(
dygraph_res, static_res))
class TestLinear(TestPool2D):
......
......@@ -385,7 +385,7 @@ class TestTransform(TestTransformBase):
st_outs = (st_outs, )
for x, y in zip(dy_outs, st_outs):
self.assertTrue(np.allclose(x.numpy(), y.numpy()))
np.testing.assert_allclose(x.numpy(), y.numpy(), rtol=1e-05)
class TestTransformForOriginalList(TestTransform):
......
......@@ -65,11 +65,15 @@ class TestFullNameDecorator(unittest.TestCase):
x = np.ones([1, 2]).astype("float32")
answer = np.zeros([1, 2]).astype("float32")
with fluid.dygraph.guard():
self.assertTrue(
np.allclose(dygraph_decorated_func(x).numpy(), answer))
self.assertTrue(np.allclose(jit_decorated_func(x).numpy(), answer))
self.assertTrue(
np.allclose(decorated_call_decorated(x).numpy(), answer))
np.testing.assert_allclose(dygraph_decorated_func(x).numpy(),
answer,
rtol=1e-05)
np.testing.assert_allclose(jit_decorated_func(x).numpy(),
answer,
rtol=1e-05)
np.testing.assert_allclose(decorated_call_decorated(x).numpy(),
answer,
rtol=1e-05)
with self.assertRaises(NotImplementedError):
DoubleDecorated().double_decorated_func1(x)
with self.assertRaises(NotImplementedError):
......
......@@ -88,7 +88,7 @@ class TestGrad(unittest.TestCase):
def test_forward(self):
dygraph_res = self._run(self.func, to_static=False)
static_res = self._run(self.func, to_static=True)
self.assertTrue(np.allclose(static_res, dygraph_res))
np.testing.assert_allclose(static_res, dygraph_res, rtol=1e-05)
class TestGradLinear(TestGrad):
......@@ -116,7 +116,7 @@ class TestGradLinear(TestGrad):
origin_res = self.func(self.x).numpy()
load_res = load_func(self.x).numpy()
self.assertTrue(np.allclose(origin_res, load_res))
np.testing.assert_allclose(origin_res, load_res, rtol=1e-05)
def test_save_train_program(self):
grad_clip = paddle.nn.ClipGradByGlobalNorm(2.0)
......@@ -136,7 +136,7 @@ class TestGradLinear(TestGrad):
origin_res = self.func(self.x).numpy()
load_res = load_func(self.x).numpy()
self.assertTrue(np.allclose(origin_res, load_res))
np.testing.assert_allclose(origin_res, load_res, rtol=1e-05)
class TestNoGradLinear(TestGradLinear):
......
......@@ -50,10 +50,9 @@ class TestGradientAggregationInDy2Static(unittest.TestCase):
return net.linear1.weight.grad
inp = paddle.to_tensor(np.random.randn(10, )).astype("float32")
self.assertTrue(
np.allclose(
simplenet_grad(inp, True).numpy(),
simplenet_grad(inp, False).numpy()))
np.testing.assert_allclose(simplenet_grad(inp, True).numpy(),
simplenet_grad(inp, False).numpy(),
rtol=1e-05)
if __name__ == '__main__':
......
......@@ -508,7 +508,9 @@ class TestDy2StIfElseBackward(unittest.TestCase):
net.train()
out = net(a, b, c)
out.backward()
self.assertTrue(np.allclose((b + net.param).numpy(), out.numpy()))
np.testing.assert_allclose((b + net.param).numpy(),
out.numpy(),
rtol=1e-05)
if __name__ == '__main__':
......
......@@ -108,8 +108,11 @@ class TestIsinstance(unittest.TestCase):
def _test_model(self, model):
st_out = train(model, to_static=True)
dy_out = train(model, to_static=False)
self.assertTrue(np.allclose(dy_out, st_out),
msg="dy_out:\n {}\n st_out:\n{}".format(dy_out, st_out))
np.testing.assert_allclose(dy_out,
st_out,
rtol=1e-05,
err_msg='dy_out:\n {}\n st_out:\n{}'.format(
dy_out, st_out))
if __name__ == "__main__":
......
......@@ -549,9 +549,12 @@ class TestLACModel(unittest.TestCase):
def test_train(self):
st_out = self.train(self.args, to_static=True)
dy_out = self.train(self.args, to_static=False)
self.assertTrue(np.allclose(dy_out, st_out),
msg="dygraph output:\n{},\nstatic output:\n {}.".format(
dy_out, st_out))
np.testing.assert_allclose(
dy_out,
st_out,
rtol=1e-05,
err_msg='dygraph output:\n{},\nstatic output:\n {}.'.format(
dy_out, st_out))
# Prediction needs trained models, so put `test_predict` at last of `test_train`
# self.verify_predict()
......@@ -564,12 +567,8 @@ class TestLACModel(unittest.TestCase):
dy_pre = self.predict_dygraph(batch)
st_pre = self.predict_static(batch)
dy_jit_pre = self.predict_dygraph_jit(batch)
self.assertTrue(np.allclose(dy_pre, st_pre),
msg="dy_pre:\n {}\n, st_pre: \n{}.".format(
dy_pre, st_pre))
self.assertTrue(np.allclose(dy_jit_pre, st_pre),
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(
dy_jit_pre, st_pre))
np.testing.assert_allclose(dy_pre, st_pre, rtol=1e-05)
np.testing.assert_allclose(dy_jit_pre, st_pre, rtol=1e-05)
def predict_dygraph(self, batch):
words, targets, length = batch
......
......@@ -85,12 +85,17 @@ class TestNestLayerHook(unittest.TestCase):
st_out = self.train_net(to_static=True)
load_out = self.load_train()
print(st_out, dy_out, load_out)
self.assertTrue(np.allclose(st_out, dy_out),
msg='dygraph_res is {}\nstatic_res is {}'.format(
dy_out, st_out))
self.assertTrue(np.allclose(st_out, load_out),
msg='load_out is {}\nstatic_res is {}'.format(
load_out, st_out))
np.testing.assert_allclose(
st_out,
dy_out,
rtol=1e-05,
err_msg='dygraph_res is {}\nstatic_res is {}'.format(
dy_out, st_out))
np.testing.assert_allclose(
st_out,
load_out,
rtol=1e-05,
err_msg='load_out is {}\nstatic_res is {}'.format(load_out, st_out))
if __name__ == "__main__":
......
......@@ -68,7 +68,7 @@ class TestLen(unittest.TestCase):
def test_len(self):
dygraph_res = self._run(to_static=False)
static_res = self._run(to_static=True)
self.assertTrue(np.allclose(dygraph_res, static_res))
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
class TestLenWithTensorArray(TestLen):
......
......@@ -249,9 +249,11 @@ class TestListWithoutControlFlow(unittest.TestCase):
self.assertEqual(len(static_res_list), len(dygraph_res_list))
for stat_res, dy_res in zip(static_res_list, dygraph_res_list):
self.assertTrue(
np.allclose(stat_res, dy_res),
msg='dygraph_res is {}\nstatic_res is {}'.format(
np.testing.assert_allclose(
stat_res,
dy_res,
rtol=1e-05,
err_msg='dygraph_res is {}\nstatic_res is {}'.format(
stat_res, dy_res))
......
......@@ -204,9 +204,12 @@ class TestLogicalNot(TestLogicalBase):
def test_transformed_result(self):
dygraph_res = self._run_dygraph()
static_res = self._run_static()
self.assertTrue(np.allclose(dygraph_res, static_res),
msg='dygraph result is {}\nstatic_result is {}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(
dygraph_res,
static_res,
rtol=1e-05,
err_msg='dygraph result is {}\nstatic_result is {}'.format(
dygraph_res, static_res))
class TestLogicalNot2(TestLogicalBase):
......@@ -217,9 +220,12 @@ class TestLogicalNot2(TestLogicalBase):
def test_transformed_result(self):
dygraph_res = self._run_dygraph()
static_res = self._run_static()
self.assertTrue(np.allclose(dygraph_res, static_res),
msg='dygraph result is {}\nstatic_result is {}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(
dygraph_res,
static_res,
rtol=1e-05,
err_msg='dygraph result is {}\nstatic_result is {}'.format(
dygraph_res, static_res))
class TestLogicalAnd(TestLogicalNot):
......
......@@ -327,7 +327,7 @@ class TestTransformWhileLoop(unittest.TestCase):
static_numpy = self._run_static()
dygraph_numpy = self._run_dygraph()
print(static_numpy, dygraph_numpy)
self.assertTrue(np.allclose(dygraph_numpy, static_numpy))
np.testing.assert_allclose(dygraph_numpy, static_numpy, rtol=1e-05)
class TestTransformWhileLoopWithoutTensor(TestTransformWhileLoop):
......@@ -404,7 +404,9 @@ class TestTransformForLoop(unittest.TestCase):
return ret.numpy()
def test_ast_to_func(self):
self.assertTrue(np.allclose(self._run_dygraph(), self._run_static()))
np.testing.assert_allclose(self._run_dygraph(),
self._run_static(),
rtol=1e-05)
class TestTransformForLoop2(TestTransformForLoop):
......
......@@ -69,9 +69,7 @@ class TestLstm(unittest.TestCase):
def test_lstm_to_static(self):
dygraph_out = self.run_lstm(to_static=False)
static_out = self.run_lstm(to_static=True)
self.assertTrue(np.allclose(dygraph_out, static_out),
msg='dygraph_out is {}\n static_out is \n{}'.format(
dygraph_out, static_out))
np.testing.assert_allclose(dygraph_out, static_out, rtol=1e-05)
def test_save_in_eval(self, with_training=True):
paddle.jit.ProgramTranslator().enable(True)
......@@ -98,15 +96,21 @@ class TestLstm(unittest.TestCase):
load_net = paddle.jit.load(model_path)
static_out = load_net(x)
self.assertTrue(np.allclose(dygraph_out.numpy(), static_out.numpy()),
msg='dygraph_out is {}\n static_out is \n{}'.format(
dygraph_out, static_out))
np.testing.assert_allclose(
dygraph_out.numpy(),
static_out.numpy(),
rtol=1e-05,
err_msg='dygraph_out is {}\n static_out is \n{}'.format(
dygraph_out, static_out))
# switch back into train mode.
net.train()
train_out = net(x)
self.assertTrue(np.allclose(dygraph_out.numpy(), train_out.numpy()),
msg='dygraph_out is {}\n static_out is \n{}'.format(
dygraph_out, train_out))
np.testing.assert_allclose(
dygraph_out.numpy(),
train_out.numpy(),
rtol=1e-05,
err_msg='dygraph_out is {}\n static_out is \n{}'.format(
dygraph_out, train_out))
def test_save_without_training(self):
self.test_save_in_eval(with_training=False)
......@@ -160,9 +164,12 @@ class TestSaveInEvalMode(unittest.TestCase):
eval_out = net(x)
infer_out = load_net(x)
self.assertTrue(np.allclose(eval_out.numpy(), infer_out.numpy()),
msg='eval_out is {}\n infer_out is \n{}'.format(
eval_out, infer_out))
np.testing.assert_allclose(
eval_out.numpy(),
infer_out.numpy(),
rtol=1e-05,
err_msg='eval_out is {}\n infer_out is \n{}'.format(
eval_out, infer_out))
class TestEvalAfterSave(unittest.TestCase):
......@@ -190,11 +197,11 @@ class TestEvalAfterSave(unittest.TestCase):
paddle.jit.save(net, model_path, input_spec=[x])
load_net = paddle.jit.load(model_path)
load_out = load_net(x)
self.assertTrue(np.allclose(dy_out.numpy(), load_out.numpy()))
np.testing.assert_allclose(dy_out.numpy(), load_out.numpy(), rtol=1e-05)
# eval
net.eval()
out = net(x)
self.assertTrue(np.allclose(dy_out.numpy(), out.numpy()))
np.testing.assert_allclose(dy_out.numpy(), out.numpy(), rtol=1e-05)
if __name__ == "__main__":
......
......@@ -38,9 +38,13 @@ class TestAMP(TestMNIST):
# NOTE(Aurelius84): In static AMP training, there is a grep_list but
# dygraph AMP don't. It will bring the numbers of cast_op is different
# and leads to loss has a bit diff.
self.assertTrue(np.allclose(dygraph_loss, static_loss, atol=1e-3),
msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_loss, static_loss))
np.testing.assert_allclose(
dygraph_loss,
static_loss,
rtol=1e-05,
atol=0.001,
err_msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_loss, static_loss))
def train(self, to_static=False):
paddle.seed(SEED)
......
......@@ -38,9 +38,13 @@ class TestPureFP16(TestMNIST):
dygraph_loss = self.train_dygraph()
static_loss = self.train_static()
# NOTE: In pure fp16 training, loss is not stable, so we enlarge atol here.
self.assertTrue(np.allclose(dygraph_loss, static_loss, atol=1e-3),
msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_loss, static_loss))
np.testing.assert_allclose(
dygraph_loss,
static_loss,
rtol=1e-05,
atol=0.001,
err_msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_loss, static_loss))
def train(self, to_static=False):
np.random.seed(SEED)
......
......@@ -80,16 +80,12 @@ class TestParameterList(unittest.TestCase):
def test_parameter_list(self):
static_loss = self.train(False, to_static=True)
dygraph_loss = self.train(False, to_static=False)
self.assertTrue(np.allclose(dygraph_loss, static_loss),
msg='dygraph result is {}\nstatic result is {}'.format(
dygraph_loss, static_loss))
np.testing.assert_allclose(dygraph_loss, static_loss, rtol=1e-05)
def test_parameter_list_iter(self):
static_loss = self.train(True, to_static=True)
dygraph_loss = self.train(True, to_static=False)
self.assertTrue(np.allclose(dygraph_loss, static_loss),
msg='dygraph result is {}\nstatic result is {}'.format(
dygraph_loss, static_loss))
np.testing.assert_allclose(dygraph_loss, static_loss, rtol=1e-05)
class NetWithRawParamList(paddle.nn.Layer):
......@@ -142,9 +138,7 @@ class TestRawParameterList(unittest.TestCase):
def test_parameter_list(self):
static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False)
self.assertTrue(np.allclose(dygraph_loss, static_loss),
msg='dygraph result is {}\nstatic result is {}'.format(
dygraph_loss, static_loss))
np.testing.assert_allclose(dygraph_loss, static_loss, rtol=1e-05)
class NetWithSubLayerParamList(paddle.nn.Layer):
......
......@@ -89,7 +89,7 @@ class TestWithNestedInput(unittest.TestCase):
def test_nest(self):
dygraph_res = self._run(to_static=False)
static_res = self._run(to_static=True)
self.assertTrue(np.allclose(dygraph_res, static_res))
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
class TestWithNestedOutput(unittest.TestCase):
......@@ -123,7 +123,9 @@ class TestWithNestedOutput(unittest.TestCase):
for dy_var, st_var in zip(dygraph_res, static_res):
if isinstance(dy_var,
(fluid.core.VarBase, fluid.core.eager.Tensor)):
self.assertTrue(np.allclose(dy_var.numpy(), st_var.numpy()))
np.testing.assert_allclose(dy_var.numpy(),
st_var.numpy(),
rtol=1e-05)
else:
self.assertTrue(dy_var, st_var)
......
......@@ -234,10 +234,10 @@ class TestEnableDeclarative(unittest.TestCase):
with fluid.dygraph.guard():
dygraph_output = self.program_translator.get_output(
simple_func, self.x, self.weight)
self.assertTrue(
np.allclose(static_output.numpy(),
dygraph_output.numpy(),
atol=1e-4))
np.testing.assert_allclose(static_output.numpy(),
dygraph_output.numpy(),
rtol=1e-05,
atol=1e-4)
def test_enable_disable_get_func(self):
......@@ -290,10 +290,10 @@ class TestEnableDeclarative(unittest.TestCase):
self.program_translator.enable(False)
with fluid.dygraph.guard():
dygraph_output = decorated_simple_func(self.x, self.weight)
self.assertTrue(
np.allclose(static_output.numpy(),
dygraph_output.numpy(),
atol=1e-4))
np.testing.assert_allclose(static_output.numpy(),
dygraph_output.numpy(),
rtol=1e-05,
atol=1e-4)
class Net(fluid.dygraph.layers.Layer):
......@@ -381,13 +381,13 @@ class TestIfElseEarlyReturn(unittest.TestCase):
answer = np.zeros([2, 2]) + 1
static_func = paddle.jit.to_static(dyfunc_with_if_else_early_return1)
out = static_func()
self.assertTrue(np.allclose(answer, out[0].numpy()))
np.testing.assert_allclose(answer, out[0].numpy(), rtol=1e-05)
def test_ifelse_early_return2(self):
answer = np.zeros([2, 2]) + 3
static_func = paddle.jit.to_static(dyfunc_with_if_else_early_return2)
out = static_func()
self.assertTrue(np.allclose(answer, out[0].numpy()))
np.testing.assert_allclose(answer, out[0].numpy(), rtol=1e-05)
class TestRemoveCommentInDy2St(unittest.TestCase):
......
......@@ -305,15 +305,9 @@ class TestPtb(unittest.TestCase):
loss_1, hidden_1, cell_1 = train_static(self.place)
loss_2, hidden_2, cell_2 = train_dygraph(self.place)
self.assertTrue(np.allclose(loss_1, loss_2),
msg="static loss: {} \ndygraph loss: {}".format(
loss_1, loss_2))
self.assertTrue(np.allclose(hidden_1, hidden_2),
msg="static hidden: {} \ndygraph acc1: {}".format(
hidden_1, hidden_2))
self.assertTrue(np.allclose(cell_1, cell_2),
msg="static cell: {} \ndygraph cell: {}".format(
cell_1, cell_2))
np.testing.assert_allclose(loss_1, loss_2, rtol=1e-05)
np.testing.assert_allclose(hidden_1, hidden_2, rtol=1e-05)
np.testing.assert_allclose(cell_1, cell_2, rtol=1e-05)
if __name__ == '__main__':
......
......@@ -308,15 +308,9 @@ class TestPtb(unittest.TestCase):
loss_1, hidden_1, cell_1 = train_static(self.place)
loss_2, hidden_2, cell_2 = train_dygraph(self.place)
self.assertTrue(np.allclose(loss_1, loss_2),
msg="static loss: {} \ndygraph loss: {}".format(
loss_1, loss_2))
self.assertTrue(np.allclose(hidden_1, hidden_2),
msg="static hidden: {} \ndygraph acc1: {}".format(
hidden_1, hidden_2))
self.assertTrue(np.allclose(cell_1, cell_2),
msg="static cell: {} \ndygraph cell: {}".format(
cell_1, cell_2))
np.testing.assert_allclose(loss_1, loss_2, rtol=1e-05)
np.testing.assert_allclose(hidden_1, hidden_2, rtol=1e-05)
np.testing.assert_allclose(cell_1, cell_2, rtol=1e-05)
if __name__ == '__main__':
......
......@@ -209,9 +209,7 @@ class TestDeclarative(unittest.TestCase):
def test_train(self):
st_out = train(self.args, self.place, to_static=True)
dy_out = train(self.args, self.place, to_static=False)
self.assertTrue(np.allclose(st_out, dy_out),
msg="dy_out:\n {}\n st_out:\n{}\n".format(
dy_out, st_out))
np.testing.assert_allclose(st_out, dy_out, rtol=1e-05)
if __name__ == '__main__':
......
......@@ -253,15 +253,12 @@ class TestReturnBase(unittest.TestCase):
self.assertTrue(isinstance(static_res, tuple))
self.assertEqual(len(dygraph_res), len(static_res))
for i in range(len(dygraph_res)):
self.assertTrue(
np.allclose(dygraph_res[i], static_res[i]),
msg='dygraph res is {}\nstatic_res is {}'.format(
dygraph_res[i], static_res[i]))
np.testing.assert_allclose(dygraph_res[i],
static_res[i],
rtol=1e-05)
elif isinstance(dygraph_res, np.ndarray):
self.assertTrue(np.allclose(dygraph_res, static_res),
msg='dygraph res is {}\nstatic_res is {}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
else:
self.assertEqual(dygraph_res, static_res)
......
......@@ -115,7 +115,7 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase):
# Check the correctness of the inference
infer_out = self.load_and_run_inference(infer_model_dir, model_filename,
params_filename, inputs)
self.assertTrue(np.allclose(gt_out, infer_out))
np.testing.assert_allclose(gt_out, infer_out, rtol=1e-05)
def load_and_run_inference(self, model_path, model_filename,
params_filename, inputs):
......
......@@ -83,8 +83,12 @@ class TestDyToStaticSaveLoad(unittest.TestCase):
program_translator.enable(False)
dygraph_out, dygraph_loss = dygraph_net(x)
self.assertTrue(np.allclose(dygraph_out.numpy(), static_out.numpy()))
self.assertTrue(np.allclose(dygraph_loss.numpy(), static_loss.numpy()))
np.testing.assert_allclose(dygraph_out.numpy(),
static_out.numpy(),
rtol=1e-05)
np.testing.assert_allclose(dygraph_loss.numpy(),
static_loss.numpy(),
rtol=1e-05)
if __name__ == '__main__':
......
......@@ -473,12 +473,17 @@ class TestSeResnet(unittest.TestCase):
st_pre = self.predict_static(image)
dy_jit_pre = self.predict_dygraph_jit(image)
predictor_pre = self.predict_analysis_inference(image)
self.assertTrue(np.allclose(dy_pre, st_pre),
msg="dy_pre:\n {}\n, st_pre: \n{}.".format(
dy_pre, st_pre))
self.assertTrue(np.allclose(dy_jit_pre, st_pre),
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(
dy_jit_pre, st_pre))
np.testing.assert_allclose(
dy_pre,
st_pre,
rtol=1e-05,
err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre))
np.testing.assert_allclose(
dy_jit_pre,
st_pre,
rtol=1e-05,
err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format(
dy_jit_pre, st_pre))
flat_st_pre = st_pre.flatten()
flat_predictor_pre = np.array(predictor_pre).flatten()
......@@ -497,18 +502,26 @@ class TestSeResnet(unittest.TestCase):
pred_2, loss_2, acc1_2, acc5_2 = self.train(self.train_reader,
to_static=True)
self.assertTrue(np.allclose(pred_1, pred_2),
msg="static pred: {} \ndygraph pred: {}".format(
pred_1, pred_2))
self.assertTrue(np.allclose(loss_1, loss_2),
msg="static loss: {} \ndygraph loss: {}".format(
loss_1, loss_2))
self.assertTrue(np.allclose(acc1_1, acc1_2),
msg="static acc1: {} \ndygraph acc1: {}".format(
acc1_1, acc1_2))
self.assertTrue(np.allclose(acc5_1, acc5_2),
msg="static acc5: {} \ndygraph acc5: {}".format(
acc5_1, acc5_2))
np.testing.assert_allclose(
pred_1,
pred_2,
rtol=1e-05,
err_msg='static pred: {} \ndygraph pred: {}'.format(pred_1, pred_2))
np.testing.assert_allclose(
loss_1,
loss_2,
rtol=1e-05,
err_msg='static loss: {} \ndygraph loss: {}'.format(loss_1, loss_2))
np.testing.assert_allclose(
acc1_1,
acc1_2,
rtol=1e-05,
err_msg='static acc1: {} \ndygraph acc1: {}'.format(acc1_1, acc1_2))
np.testing.assert_allclose(
acc5_1,
acc5_2,
rtol=1e-05,
err_msg='static acc5: {} \ndygraph acc5: {}'.format(acc5_1, acc5_2))
self.verify_predict()
......
......@@ -353,9 +353,11 @@ class TestSentiment(unittest.TestCase):
self.args.model_type = model_type
st_out = train(self.args, True)
dy_out = train(self.args, False)
self.assertTrue(np.allclose(dy_out, st_out),
msg="dy_out:\n {}\n st_out:\n {}".format(
dy_out, st_out))
np.testing.assert_allclose(dy_out,
st_out,
rtol=1e-05,
err_msg='dy_out:\n {}\n st_out:\n {}'.format(
dy_out, st_out))
def test_train(self):
model_types = ['cnn_net', 'bow_net', 'gru_net', 'bigru_net']
......
......@@ -139,9 +139,7 @@ class TestSliceWithoutControlFlow(unittest.TestCase):
def test_transformed_static_result(self):
static_res = self.run_static_mode()
dygraph_res = self.run_dygraph_mode()
self.assertTrue(np.allclose(dygraph_res, static_res),
msg='dygraph_res is {}\nstatic_res is {}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
class TestSliceInIf(TestSliceWithoutControlFlow):
......@@ -283,11 +281,7 @@ class TestPaddleStridedSlice(unittest.TestCase):
strides=stride2)
array_slice = array[s2[0]:e2[0]:stride2[0], ::, s2[1]:e2[1]:stride2[1]]
np.testing.assert_array_equal(
sl.numpy(),
array_slice,
err_msg='paddle.strided_slice:\n {} \n numpy slice:\n{}'.format(
sl.numpy(), array_slice))
np.testing.assert_array_equal(sl.numpy(), array_slice)
if __name__ == '__main__':
......
......@@ -14,7 +14,7 @@
from __future__ import print_function
import numpy
import numpy as np
import paddle
import unittest
......@@ -37,9 +37,7 @@ class TestTensorClone(unittest.TestCase):
def test_tensor_clone(self):
dygraph_res = self._run(to_static=False)
static_res = self._run(to_static=True)
self.assertTrue(numpy.allclose(dygraph_res, static_res),
msg='dygraph res is {}\nstatic_res is {}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
@paddle.jit.to_static
......
......@@ -14,7 +14,7 @@
from __future__ import print_function
import numpy
import numpy as np
import unittest
import paddle
......@@ -39,7 +39,7 @@ def dyfunc_tensor_shape_2(x):
def dyfunc_tensor_shape_3(x):
# Transform y.shape but run y.shape actually because y is not Tensor
x = fluid.dygraph.to_variable(x)
y = numpy.ones(5)
y = np.ones(5)
res = fluid.layers.reshape(x, shape=y.shape)
return res
......@@ -200,7 +200,7 @@ def dyfunc_with_while_3(x):
def dyfunc_with_while_4(x):
x = paddle.to_tensor(x)
y = numpy.ones(5)
y = np.ones(5)
y_shape_0 = y.shape[0]
i = 1
......@@ -235,7 +235,7 @@ def dyfunc_dict_assign_shape():
class TestTensorShapeBasic(unittest.TestCase):
def setUp(self):
self.input = numpy.ones(5).astype("int32")
self.input = np.ones(5).astype("int32")
self.place = fluid.CUDAPlace(
0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
self._set_input_spec()
......@@ -265,9 +265,7 @@ class TestTensorShapeBasic(unittest.TestCase):
def test_transformed_static_result(self):
static_res = self.get_static_output()
dygraph_res = self.get_dygraph_output()
self.assertTrue(numpy.allclose(dygraph_res, static_res),
msg='dygraph res is {}\nstatic_res is {}'.format(
dygraph_res, static_res))
np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05)
def _set_expected_op_num(self):
self.expected_op_num = 2
......@@ -342,7 +340,7 @@ class TestTensorShapeBasic6(TestTensorShapeBasic):
class TestTupleShape1(TestTensorShapeBasic):
def init_test_func(self):
self.input = numpy.ones((5, 7)).astype("int32")
self.input = np.ones((5, 7)).astype("int32")
self.input_spec = [
paddle.static.InputSpec(shape=[-1, -1], dtype="int32")
]
......@@ -357,7 +355,7 @@ class TestTupleShape1(TestTensorShapeBasic):
class TestTupleShape2(TestTensorShapeBasic):
def init_test_func(self):
self.input = numpy.ones((5, 7)).astype("int32")
self.input = np.ones((5, 7)).astype("int32")
self.input_spec = [
paddle.static.InputSpec(shape=[-1, 7], dtype="int32")
]
......@@ -372,7 +370,7 @@ class TestTupleShape2(TestTensorShapeBasic):
class TestTupleShape3(TestTensorShapeBasic):
def init_test_func(self):
self.input = numpy.ones((5, 7)).astype("int32")
self.input = np.ones((5, 7)).astype("int32")
self.input_spec = [paddle.static.InputSpec(shape=[5, 7], dtype="int32")]
self.dygraph_func = dyfunc_tuple_shape_3
......@@ -385,7 +383,7 @@ class TestTupleShape3(TestTensorShapeBasic):
class TestPaddleShapeApi(TestTensorShapeBasic):
def init_test_func(self):
self.input = numpy.ones((5, 7)).astype("int32")
self.input = np.ones((5, 7)).astype("int32")
self.input_spec = [paddle.static.InputSpec(shape=[5, 7], dtype="int32")]
self.dygraph_func = dyfunc_paddle_shape_api
......@@ -597,7 +595,7 @@ class TestOpNumWithTensorShapeInWhile1(TestOpNumBasicWithTensorShape):
class TestChangeShapeAfterAssign(TestTensorShapeBasic):
def init_test_func(self):
self.input = numpy.ones((2, 3)).astype("int32")
self.input = np.ones((2, 3)).astype("int32")
self.input_spec = [
paddle.static.InputSpec(shape=[-1, 3], dtype="int32")
]
......
......@@ -395,19 +395,17 @@ class TestTransformer(unittest.TestCase):
args, batch_generator = self.prepare(mode='train')
static_avg_loss = train_static(args, batch_generator)
dygraph_avg_loss = train_dygraph(args, batch_generator)
self.assertTrue(np.allclose(static_avg_loss, dygraph_avg_loss))
np.testing.assert_allclose(static_avg_loss,
dygraph_avg_loss,
rtol=1e-05)
def _test_predict(self):
args, batch_generator = self.prepare(mode='test')
static_seq_ids, static_scores = predict_static(args, batch_generator)
dygraph_seq_ids, dygraph_scores = predict_dygraph(args, batch_generator)
self.assertTrue(np.allclose(static_seq_ids, static_seq_ids),
msg="static_seq_ids: {} \n dygraph_seq_ids: {}".format(
static_seq_ids, dygraph_seq_ids))
self.assertTrue(np.allclose(static_scores, dygraph_scores),
msg="static_scores: {} \n dygraph_scores: {}".format(
static_scores, dygraph_scores))
np.testing.assert_allclose(static_seq_ids, static_seq_ids, rtol=1e-05)
np.testing.assert_allclose(static_scores, dygraph_scores, rtol=1e-05)
def test_check_result(self):
self._test_train()
......
......@@ -347,9 +347,7 @@ class TestTsm(unittest.TestCase):
fake_data_reader = FakeDataReader("train", parse_config(args.config))
dygraph_loss = train(args, fake_data_reader, to_static=False)
static_loss = train(args, fake_data_reader, to_static=True)
self.assertTrue(np.allclose(dygraph_loss, static_loss),
msg="dygraph_loss: {} \nstatic_loss: {}".format(
dygraph_loss, static_loss))
np.testing.assert_allclose(dygraph_loss, static_loss, rtol=1e-05)
if __name__ == '__main__':
......
......@@ -101,7 +101,7 @@ class TestTyping(unittest.TestCase):
out = self.run_dy()
load_net = self.save_and_load('tuple')
load_out = load_net(self.x)
self.assertTrue(np.allclose(out, load_out))
np.testing.assert_allclose(out, load_out, rtol=1e-05)
class TestTypingTuple(TestTyping):
......
......@@ -301,9 +301,7 @@ class TestWord2Vec(unittest.TestCase):
def test_dygraph_static_same_loss(self):
dygraph_loss = train(to_static=False)
static_loss = train(to_static=True)
self.assertTrue(np.allclose(dygraph_loss, static_loss),
msg="dygraph_loss: {} \nstatic_loss: {}".format(
dygraph_loss, static_loss))
np.testing.assert_allclose(dygraph_loss, static_loss, rtol=1e-05)
if __name__ == '__main__':
......
......@@ -168,12 +168,10 @@ class TestYolov3(unittest.TestCase):
def test_dygraph_static_same_loss(self):
dygraph_loss = train(to_static=False)
static_loss = train(to_static=True)
self.assertTrue(np.allclose(dygraph_loss,
static_loss,
atol=1e-5,
rtol=1e-3),
msg="dygraph_loss: {} \nstatic_loss: {}".format(
dygraph_loss, static_loss))
np.testing.assert_allclose(dygraph_loss,
static_loss,
rtol=0.001,
atol=1e-05)
if __name__ == '__main__':
......
......@@ -112,12 +112,13 @@ class TestFft(unittest.TestCase):
"""Test fft with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
self.assertTrue(
np.allclose(scipy.fft.fft(self.x, self.n, self.axis, self.norm),
paddle.fft.fft(paddle.to_tensor(self.x), self.n,
self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype))))
np.testing.assert_allclose(scipy.fft.fft(self.x, self.n, self.axis,
self.norm),
paddle.fft.fft(paddle.to_tensor(self.x),
self.n, self.axis,
self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype)))
@place(DEVICES)
......@@ -138,13 +139,13 @@ class TestIfft(unittest.TestCase):
"""Test ifft with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
self.assertTrue(
np.allclose(scipy.fft.ifft(self.x, self.n, self.axis,
self.norm),
paddle.fft.ifft(paddle.to_tensor(self.x), self.n,
self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype))))
np.testing.assert_allclose(scipy.fft.ifft(self.x, self.n, self.axis,
self.norm),
paddle.fft.ifft(paddle.to_tensor(self.x),
self.n, self.axis,
self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype)))
@place(DEVICES)
......@@ -190,13 +191,13 @@ class TestFft2(unittest.TestCase):
"""Test fft2 with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
self.assertTrue(
np.allclose(scipy.fft.fft2(self.x, self.n, self.axis,
self.norm),
paddle.fft.fft2(paddle.to_tensor(self.x), self.n,
self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype))))
np.testing.assert_allclose(scipy.fft.fft2(self.x, self.n, self.axis,
self.norm),
paddle.fft.fft2(paddle.to_tensor(self.x),
self.n, self.axis,
self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype)))
@place(DEVICES)
......@@ -712,13 +713,13 @@ class TestRfft(unittest.TestCase):
"""Test rfft with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
self.assertTrue(
np.allclose(scipy.fft.rfft(self.x, self.n, self.axis,
self.norm),
paddle.fft.rfft(paddle.to_tensor(self.x), self.n,
self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype))))
np.testing.assert_allclose(scipy.fft.rfft(self.x, self.n, self.axis,
self.norm),
paddle.fft.rfft(paddle.to_tensor(self.x),
self.n, self.axis,
self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype)))
@place(DEVICES)
......@@ -764,13 +765,12 @@ class TestRfft2(unittest.TestCase):
"""Test rfft2 with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
self.assertTrue(
np.allclose(scipy.fft.rfft2(self.x, self.n, self.axis,
self.norm),
paddle.fft.rfft2(paddle.to_tensor(self.x), self.n,
self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype))))
np.testing.assert_allclose(
scipy.fft.rfft2(self.x, self.n, self.axis, self.norm),
paddle.fft.rfft2(paddle.to_tensor(self.x), self.n, self.axis,
self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype)))
@place(DEVICES)
......@@ -821,13 +821,12 @@ class TestRfftn(unittest.TestCase):
"""Test rfftn with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
self.assertTrue(
np.allclose(scipy.fft.rfftn(self.x, self.n, self.axis,
self.norm),
paddle.fft.rfftn(paddle.to_tensor(self.x), self.n,
self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype))))
np.testing.assert_allclose(
scipy.fft.rfftn(self.x, self.n, self.axis, self.norm),
paddle.fft.rfftn(paddle.to_tensor(self.x), self.n, self.axis,
self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype)))
@place(DEVICES)
......@@ -980,13 +979,12 @@ class TestIhfftn(unittest.TestCase):
"""Test ihfftn with norm condition
"""
with paddle.fluid.dygraph.guard(self.place):
self.assertTrue(
np.allclose(scipy.fft.ihfftn(self.x, self.n, self.axis,
self.norm),
paddle.fft.ihfftn(paddle.to_tensor(self.x), self.n,
self.axis, self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype))))
np.testing.assert_allclose(
scipy.fft.ihfftn(self.x, self.n, self.axis, self.norm),
paddle.fft.ihfftn(paddle.to_tensor(self.x), self.n, self.axis,
self.norm),
rtol=RTOL.get(str(self.x.dtype)),
atol=ATOL.get(str(self.x.dtype)))
@place(DEVICES)
......
......@@ -114,8 +114,10 @@ class TestBase(IPUOpTest):
res0 = self._test_base(False)
res1 = self._test_base(True)
self.assertTrue(
np.allclose(res0.flatten(), res1.flatten(), atol=self.atol))
np.testing.assert_allclose(res0.flatten(),
res1.flatten(),
rtol=1e-05,
atol=self.atol)
self.assertTrue(res0.shape == res1.shape)
......
......@@ -122,7 +122,7 @@ class TestBase(IPUD2STest):
def test_training(self):
cpu_loss = self._test(False).flatten()
ipu_loss = self._test(True).flatten()
self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-2))
np.testing.assert_allclose(ipu_loss, cpu_loss, rtol=1e-05, atol=0.01)
if __name__ == "__main__":
......
......@@ -114,7 +114,7 @@ class TestBase(IPUD2STest):
def test_training(self):
ipu_loss = self._test(True).flatten()
cpu_loss = self._test(False).flatten()
self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-4))
np.testing.assert_allclose(ipu_loss, cpu_loss, rtol=1e-05, atol=1e-4)
class TestSaveLoad(TestBase):
......
......@@ -120,7 +120,10 @@ class TestBase(IPUOpTest):
ipu_loss = self._test_optimizer(True).flatten()
cpu_loss = self._test_optimizer(False).flatten()
self.assertTrue(ipu_loss[0] == ipu_loss[99])
self.assertTrue(np.allclose(ipu_loss[100:], cpu_loss, atol=self.atol))
np.testing.assert_allclose(ipu_loss[100:],
cpu_loss,
rtol=1e-05,
atol=self.atol)
if __name__ == "__main__":
......
......@@ -93,7 +93,10 @@ class TestBase(IPUOpTest):
# none
cpu_res = self.feed['x']
self.assertTrue(np.allclose(ipu_res[0], cpu_res, atol=self.atol))
np.testing.assert_allclose(ipu_res[0],
cpu_res,
rtol=1e-05,
atol=self.atol)
def test_base(self):
# TODO: use string instead of int for reduction
......
......@@ -142,7 +142,7 @@ class TestBase(IPUOpTest):
cpu_res = self._test_load(False)
ipu_res = self._test_load(True)
self.assertTrue(np.allclose(cpu_res, ipu_res, atol=self.atol))
np.testing.assert_allclose(cpu_res, ipu_res, rtol=1e-05, atol=self.atol)
self.attrs['path'].cleanup()
......
......@@ -59,8 +59,10 @@ class TestIpuShard(unittest.TestCase):
def test_ipu_shard(self):
ipu_index_list = self._test()
expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2]
self.assertTrue(
np.allclose(ipu_index_list, expected_ipu_index_list, atol=0))
np.testing.assert_allclose(ipu_index_list,
expected_ipu_index_list,
rtol=1e-05,
atol=0)
class TestIpuPipeline(unittest.TestCase):
......@@ -102,8 +104,10 @@ class TestIpuPipeline(unittest.TestCase):
ipu_index_list = self._test()
expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2]
self.assertTrue(
np.allclose(ipu_index_list, expected_ipu_index_list, atol=0))
np.testing.assert_allclose(ipu_index_list,
expected_ipu_index_list,
rtol=1e-05,
atol=0)
if __name__ == "__main__":
......
......@@ -84,7 +84,7 @@ class TestConvNet(IPUOpTest):
ipu_loss = self.run_model(True).flatten()
cpu_loss = self.run_model(False).flatten()
self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-10))
np.testing.assert_allclose(ipu_loss, cpu_loss, rtol=1e-05, atol=1e-10)
if __name__ == "__main__":
......
......@@ -85,8 +85,10 @@ class TestBase(IPUOpTest):
def test_base(self):
res0 = self.run_model(False)
res1 = self.run_model(True)
self.assertTrue(
np.allclose(res0.flatten(), res1.flatten(), atol=self.atol))
np.testing.assert_allclose(res0.flatten(),
res1.flatten(),
rtol=1e-05,
atol=self.atol)
self.assertTrue(res0.shape == res1.shape)
......
......@@ -126,7 +126,10 @@ class TestBase(IPUOpTest):
cpu_outputs = self._test_base(False)
ipu_outputs = self._test_base(True)
self.assertTrue(np.allclose(cpu_outputs, ipu_outputs, atol=self.atol))
np.testing.assert_allclose(cpu_outputs,
ipu_outputs,
rtol=1e-05,
atol=self.atol)
class TestReplicaInference(TestBase):
......@@ -255,7 +258,10 @@ class TestReplicaTrain(TestTrainBase):
cpu_outputs = self._test_base(False)
ipu_outputs = self._test_base(True)[::2]
self.assertTrue(np.allclose(cpu_outputs, ipu_outputs, atol=self.atol))
np.testing.assert_allclose(cpu_outputs,
ipu_outputs,
rtol=1e-05,
atol=self.atol)
class TestReplicaCollectiveTrain(TestTrainBase):
......@@ -293,7 +299,10 @@ class TestReplicaCollectiveTrain(TestTrainBase):
cpu_outputs = self._test_base(False)
ipu_outputs = self._test_base(True)[::2]
self.assertTrue(np.allclose(cpu_outputs, ipu_outputs, atol=self.atol))
np.testing.assert_allclose(cpu_outputs,
ipu_outputs,
rtol=1e-05,
atol=self.atol)
class TestPipelineTrain(TestTrainBase):
......@@ -322,7 +331,10 @@ class TestPipelineTrain(TestTrainBase):
cpu_outputs = self._test_base(False)
ipu_outputs = self._test_base(True)[::3]
self.assertTrue(np.allclose(cpu_outputs, ipu_outputs, atol=self.atol))
np.testing.assert_allclose(cpu_outputs,
ipu_outputs,
rtol=1e-05,
atol=self.atol)
class TestAdamTrain(TestTrainBase):
......
......@@ -118,7 +118,10 @@ class TestBase(IPUOpTest):
ipu_loss = self._test_optimizer(True).flatten()
cpu_loss = self._test_optimizer(False).flatten()
self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=self.atol))
np.testing.assert_allclose(ipu_loss,
cpu_loss,
rtol=1e-05,
atol=self.atol)
@unittest.skip('do not support L2 regularization')
......
......@@ -172,7 +172,7 @@ class TestD2S(IPUD2STest):
def test_training(self):
ipu_loss = self._test(True).flatten()
cpu_loss = self._test(False).flatten()
self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-4))
np.testing.assert_allclose(ipu_loss, cpu_loss, rtol=1e-05, atol=1e-4)
if __name__ == "__main__":
......
......@@ -112,8 +112,10 @@ class TestBase(IPUOpTest):
def test_base(self):
res0 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP32, True)
res1 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP32, False)
self.assertTrue(
np.allclose(res0.flatten(), res1.flatten(), atol=self.atol))
np.testing.assert_allclose(res0.flatten(),
res1.flatten(),
rtol=1e-05,
atol=self.atol)
self.attrs['model_path'].cleanup()
......@@ -188,8 +190,10 @@ class TestSGDFP16(TestBase):
def test_base(self):
res0 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP16, True)
res1 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP16, False)
self.assertTrue(
np.allclose(res0.flatten(), res1.flatten(), atol=self.atol))
np.testing.assert_allclose(res0.flatten(),
res1.flatten(),
rtol=1e-05,
atol=self.atol)
self.attrs['model_path'].cleanup()
......
......@@ -74,8 +74,10 @@ class TestSetIpuShard(unittest.TestCase):
ipu_index_list = self._test()
expected_ipu_index_list = [1, 1, 2, 3, 3, 3, 4, 4]
self.assertTrue(
np.allclose(ipu_index_list, expected_ipu_index_list, atol=0))
np.testing.assert_allclose(ipu_index_list,
expected_ipu_index_list,
rtol=1e-05,
atol=0)
class TestSetIpuPipeline(unittest.TestCase):
......@@ -107,8 +109,10 @@ class TestSetIpuPipeline(unittest.TestCase):
ipu_index_list = self._test()
expected_ipu_index_list = [1, 1, 2, 3, 3, 3, 4, 4]
self.assertTrue(
np.allclose(ipu_index_list, expected_ipu_index_list, atol=0))
np.testing.assert_allclose(ipu_index_list,
expected_ipu_index_list,
rtol=1e-05,
atol=0)
class TestSetIpuShardAndPipeline(unittest.TestCase):
......@@ -147,8 +151,10 @@ class TestSetIpuShardAndPipeline(unittest.TestCase):
1, 1, 2, 3, 3, 3, 4, 4, 2, 2, 3, 4, 4, 4, 1, 1
]
self.assertTrue(
np.allclose(ipu_index_list, expected_ipu_index_list, atol=0))
np.testing.assert_allclose(ipu_index_list,
expected_ipu_index_list,
rtol=1e-05,
atol=0)
class TestSetIpuForModel(unittest.TestCase):
......@@ -182,8 +188,10 @@ class TestSetIpuForModel(unittest.TestCase):
1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2
]
self.assertTrue(
np.allclose(ipu_index_list, expected_ipu_index_list, atol=0))
np.testing.assert_allclose(ipu_index_list,
expected_ipu_index_list,
rtol=1e-05,
atol=0)
class TestSetIpuMixedModel(unittest.TestCase):
......@@ -234,8 +242,10 @@ class TestSetIpuMixedModel(unittest.TestCase):
1, 1, 2, 2, 2, 3, 4, 4, 2, 2, 3, 3, 3, 4, 1, 1
]
self.assertTrue(
np.allclose(ipu_index_list, expected_ipu_index_list, atol=0))
np.testing.assert_allclose(ipu_index_list,
expected_ipu_index_list,
rtol=1e-05,
atol=0)
if __name__ == "__main__":
......
......@@ -95,8 +95,10 @@ class TestBase(IPUOpTest):
res0 = self._test_base(True)
res1 = self._test_base(False)
self.assertTrue(
np.allclose(res0.flatten(), res1.flatten(), atol=self.atol))
np.testing.assert_allclose(res0.flatten(),
res1.flatten(),
rtol=1e-05,
atol=self.atol)
self.assertTrue(res0.shape == res1.shape)
......
......@@ -121,7 +121,10 @@ class TestBase(IPUOpTest):
ipu_loss = self._test_optimizer(True).flatten()
cpu_loss = self._test_optimizer(False).flatten()
self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=self.atol))
np.testing.assert_allclose(ipu_loss,
cpu_loss,
rtol=1e-05,
atol=self.atol)
if __name__ == "__main__":
......
......@@ -102,8 +102,10 @@ class TestWeightSharing(IPUOpTest):
res0 = self.run_model(False)
res1 = self.run_model(True)
self.assertTrue(
np.allclose(res0.flatten(), res1[0].flatten(), atol=self.atol))
np.testing.assert_allclose(res0.flatten(),
res1[0].flatten(),
rtol=1e-05,
atol=self.atol)
if __name__ == "__main__":
......
......@@ -136,9 +136,12 @@ class AutoScanTest(unittest.TestCase):
"The output shapes are not equal, the baseline shape is " +
str(baseline[key].shape) + ', but got ' + str(arr.shape))
diff = abs(baseline[key] - arr)
self.assertTrue(
np.allclose(baseline[key], arr, atol=atol, rtol=rtol),
"Output has diff, Maximum absolute error: {}".format(
np.testing.assert_allclose(
baseline[key],
arr,
rtol=rtol,
atol=atol,
err_msg='Output has diff, Maximum absolute error: {}'.format(
np.amax(diff)))
@abc.abstractmethod
......
......@@ -192,9 +192,13 @@ class InferencePassTest(unittest.TestCase):
paddle_out = paddle_out.flatten()
inference_out = inference_out.flatten()
self.assertTrue(
np.allclose(paddle_out, inference_out, atol=atol),
"Output has diff between inference and training forward at {} ".
np.testing.assert_allclose(
paddle_out,
inference_out,
rtol=1e-05,
atol=atol,
err_msg=
'Output has diff between inference and training forward at {} '.
format(device))
# Check whether the trt results and the GPU results are the same.
......@@ -220,12 +224,12 @@ class InferencePassTest(unittest.TestCase):
paddle_out = paddle_out.flatten()
tensorrt_output = tensorrt_output.flatten()
self.assertTrue(
np.allclose(paddle_out,
tensorrt_output,
rtol=rtol,
atol=atol),
"Output has diff between GPU and TensorRT. ")
np.testing.assert_allclose(
paddle_out,
tensorrt_output,
rtol=rtol,
atol=atol,
err_msg='Output has diff between GPU and TensorRT. ')
# Check whether the mkldnn results and the CPU results are the same.
if (not use_gpu) and self.enable_mkldnn:
......@@ -240,9 +244,12 @@ class InferencePassTest(unittest.TestCase):
if self.enable_mkldnn_bfloat16:
atol = 0.01
for paddle_out, mkldnn_output in zip(paddle_outs, mkldnn_outputs):
self.assertTrue(
np.allclose(np.array(paddle_out), mkldnn_output, atol=atol),
"Output has diff between CPU and MKLDNN. ")
np.testing.assert_allclose(
np.array(paddle_out),
mkldnn_output,
rtol=1e-05,
atol=atol,
err_msg='Output has diff between CPU and MKLDNN. ')
class TensorRTParam:
'''
......
......@@ -290,9 +290,13 @@ class QuantDequantTest(unittest.TestCase):
paddle_out = paddle_out.flatten()
inference_out = inference_out.flatten()
self.assertTrue(
np.allclose(paddle_out, inference_out, atol=atol),
"Output has diff between inference and training forward at {} ".
np.testing.assert_allclose(
paddle_out,
inference_out,
rtol=1e-05,
atol=atol,
err_msg=
'Output has diff between inference and training forward at {} '.
format(device))
# Check whether the trt results and the GPU results are the same.
......@@ -319,12 +323,12 @@ class QuantDequantTest(unittest.TestCase):
paddle_out = paddle_out.flatten()
tensorrt_output = tensorrt_output.flatten()
self.assertTrue(
np.allclose(paddle_out,
tensorrt_output,
rtol=rtol,
atol=atol),
"Output has diff between GPU and TensorRT. ")
np.testing.assert_allclose(
paddle_out,
tensorrt_output,
rtol=rtol,
atol=atol,
err_msg='Output has diff between GPU and TensorRT. ')
# Check whether the mkldnn results and the CPU results are the same.
if (not use_gpu) and self.enable_mkldnn:
......@@ -339,9 +343,12 @@ class QuantDequantTest(unittest.TestCase):
if self.enable_mkldnn_bfloat16:
atol = 0.01
for paddle_out, mkldnn_output in zip(paddle_outs, mkldnn_outputs):
self.assertTrue(
np.allclose(np.array(paddle_out), mkldnn_output, atol=atol),
"Output has diff between CPU and MKLDNN. ")
np.testing.assert_allclose(
np.array(paddle_out),
mkldnn_output,
rtol=1e-05,
atol=atol,
err_msg='Output has diff between CPU and MKLDNN. ')
class TensorRTParam:
'''
......
......@@ -163,9 +163,12 @@ class TrtConvertMulticlassNMS3Test(TrtLayerAutoScanTest):
"The output shapes are not equal, the baseline shape is " +
str(basline_arr.shape) + ', but got ' + str(arr.shape))
diff = abs(basline_arr - arr)
self.assertTrue(
np.allclose(basline_arr, arr, atol=atol, rtol=rtol),
"Output has diff, Maximum absolute error: {}".format(
np.testing.assert_allclose(
basline_arr,
arr,
rtol=rtol,
atol=atol,
err_msg='Output has diff, Maximum absolute error: {}'.format(
np.amax(diff)))
def assert_op_size(self, trt_engine_num, paddle_op_num):
......
......@@ -62,4 +62,7 @@ class TestFuseResNetUnit(unittest.TestCase):
feed = {"x": np.random.randn(1, 64, 64, 8).astype("float16")}
before_out = exe.run(program, feed=feed, fetch_list=[out.name])
after_out = exe.run(after_program, feed=feed, fetch_list=[out.name])
self.assertTrue(np.allclose(before_out[0], after_out[0], atol=5e-3))
np.testing.assert_allclose(before_out[0],
after_out[0],
rtol=1e-05,
atol=0.005)
......@@ -291,7 +291,7 @@ class TestGeneratePass(unittest.TestCase):
after_out = executor.run(after_program,
feed=feed,
fetch_list=[out.name])
self.assertTrue(np.allclose(before_out, after_out))
np.testing.assert_allclose(before_out, after_out, rtol=1e-05)
def test_multi_add_to_sum(self):
paddle.enable_static()
......@@ -327,8 +327,8 @@ class TestGeneratePass(unittest.TestCase):
after_out1, after_out2 = executor.run(after_program,
feed=feed,
fetch_list=[out1.name, out2.name])
self.assertTrue(np.allclose(before_out1, after_out1))
self.assertTrue(np.allclose(before_out2, after_out2))
np.testing.assert_allclose(before_out1, after_out1, rtol=1e-05)
np.testing.assert_allclose(before_out2, after_out2, rtol=1e-05)
def test_generate_combine_mul_v2(self):
helper = ir.RegisterPassHelper([generate_combine_mul_v2()])
......@@ -369,7 +369,7 @@ class TestGeneratePass(unittest.TestCase):
after_out = executor.run(after_program,
feed=feed,
fetch_list=[out.name])
self.assertTrue(np.allclose(before_out, after_out))
np.testing.assert_allclose(before_out, after_out, rtol=1e-05)
def test_generate_simplify_inference(self):
self.check_generate_simplify_inference("generate_simplify_inference_v1")
......@@ -405,4 +405,4 @@ class TestGeneratePass(unittest.TestCase):
after_out = executor.run(after_program,
feed=feed,
fetch_list=[out.name])
self.assertTrue(np.allclose(before_out, after_out))
np.testing.assert_allclose(before_out, after_out, rtol=1e-05)
......@@ -38,8 +38,11 @@ _set_use_system_allocator(True)
class TestLayerNormBF16MKLDNNOp(TestLayerNormMKLDNNOp):
def __assert_close(self, tensor, np_array, msg, rtol=2e-02, atol=2):
self.assertTrue(
np.allclose(np.array(tensor), np_array, rtol=rtol, atol=atol), msg)
np.testing.assert_allclose(np.array(tensor),
np_array,
rtol=rtol,
atol=atol,
err_msg=msg)
def check_forward(self,
shape,
......
......@@ -57,7 +57,11 @@ class TestLayerNormMKLDNNOp(unittest.TestCase):
self.use_mkldnn = True
def __assert_close(self, tensor, np_array, msg, atol=1e-4):
self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg)
np.testing.assert_allclose(np.array(tensor),
np_array,
rtol=1e-05,
atol=atol,
err_msg=msg)
def check_forward(self,
shape,
......
......@@ -360,8 +360,10 @@ class TestReQuantizeOpReused(TestReQuantizeOp):
feed={'input': variables['input']},
fetch_list=['output'])
self.assertTrue(np.allclose(variables['output'], out[0], atol=1e-4),
'output')
np.testing.assert_allclose(variables['output'],
out[0],
rtol=1e-05,
atol=1e-4)
# ---------------test reused requantize op, no shift------------------------
......
......@@ -80,9 +80,13 @@ class TestMKLDNNSumInplaceOp(unittest.TestCase):
sum_op.run(scope, place)
out = scope.find_var("x0").get_tensor()
out_array = np.array(out)
self.assertTrue(
np.allclose(expected_out, out_array, atol=1e-5),
"Inplace sum_mkldnn_op output has diff with expected output")
np.testing.assert_allclose(
expected_out,
out_array,
rtol=1e-05,
atol=1e-05,
err_msg='Inplace sum_mkldnn_op output has diff with expected output'
)
def test_check_grad(self):
pass
......
......@@ -345,8 +345,9 @@ class TestArgMaxAPI(unittest.TestCase):
tensor_input = paddle.to_tensor(numpy_input)
numpy_output = np.argmax(numpy_input, axis=self.axis)
paddle_output = paddle.argmax(tensor_input, axis=self.axis)
self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()),
True)
np.testing.assert_allclose(numpy_output,
paddle_output.numpy(),
rtol=1e-05)
paddle.enable_static()
for place in self.place:
......@@ -378,8 +379,9 @@ class TestArgMaxAPI_2(unittest.TestCase):
paddle_output = paddle.argmax(tensor_input,
axis=self.axis,
keepdim=self.keep_dims)
self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()),
True)
np.testing.assert_allclose(numpy_output,
paddle_output.numpy(),
rtol=1e-05)
self.assertEqual(numpy_output.shape, paddle_output.numpy().shape)
paddle.enable_static()
......@@ -407,8 +409,9 @@ class TestArgMaxAPI_3(unittest.TestCase):
tensor_input = paddle.to_tensor(numpy_input)
numpy_output = np.argmax(numpy_input).reshape([1])
paddle_output = paddle.argmax(tensor_input)
self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()),
True)
np.testing.assert_allclose(numpy_output,
paddle_output.numpy(),
rtol=1e-05)
self.assertEqual(numpy_output.shape, paddle_output.numpy().shape)
paddle.enable_static()
......
......@@ -199,8 +199,10 @@ class TestBatchNormChannelLast(unittest.TestCase):
channel_first_x = paddle.transpose(x, [0, 2, 1])
y2 = net2(channel_first_x)
y2 = paddle.transpose(y2, [0, 2, 1])
self.assertEqual(
np.allclose(y1.numpy(), y2.numpy(), atol=1e-07), True)
np.testing.assert_allclose(y1.numpy(),
y2.numpy(),
rtol=1e-05,
atol=1e-07)
def test_2d(self):
for p in self.places:
......@@ -214,8 +216,10 @@ class TestBatchNormChannelLast(unittest.TestCase):
channel_first_x = paddle.transpose(x, [0, 3, 1, 2])
y2 = net2(channel_first_x)
y2 = paddle.transpose(y2, [0, 2, 3, 1])
self.assertEqual(
np.allclose(y1.numpy(), y2.numpy(), atol=1e-07), True)
np.testing.assert_allclose(y1.numpy(),
y2.numpy(),
rtol=1e-05,
atol=1e-07)
def test_3d(self):
for p in self.places:
......@@ -229,8 +233,10 @@ class TestBatchNormChannelLast(unittest.TestCase):
channel_first_x = paddle.transpose(x, [0, 4, 1, 2, 3])
y2 = net2(channel_first_x)
y2 = paddle.transpose(y2, [0, 2, 3, 4, 1])
self.assertEqual(
np.allclose(y1.numpy(), y2.numpy(), atol=1e-07), True)
np.testing.assert_allclose(y1.numpy(),
y2.numpy(),
rtol=1e-05,
atol=1e-07)
# res = np.allclose(y1.numpy(), y2.numpy())
# if res == False:
# np.savetxt("./y1.txt", y1.numpy().flatten(), fmt='%.10f', delimiter='\n')
......@@ -270,7 +276,7 @@ class TestBatchNormUseGlobalStats(unittest.TestCase):
net2.training = False
y1 = net1(x)
y2 = net2(x)
self.assertEqual(np.allclose(y1.numpy(), y2.numpy()), True)
np.testing.assert_allclose(y1.numpy(), y2.numpy(), rtol=1e-05)
class TestBatchNormUseGlobalStatsCase1(TestBatchNormUseGlobalStats):
......
......@@ -65,12 +65,7 @@ class TestGaussianRandomOp(OpTest):
hist2, _ = np.histogram(data, range=(-3, 5))
hist2 = hist2.astype("float32")
hist2 /= float(outs[0].size)
np.testing.assert_allclose(hist,
hist2,
rtol=0,
atol=0.01,
err_msg="hist: " + str(hist) + " hist2: " +
str(hist2))
np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01)
class TestMeanStdAreInt(TestGaussianRandomOp):
......
......@@ -511,10 +511,9 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase):
place=place, use_amp=use_amp, use_multi_tensor=True)
output2, params2 = self._momentum_optimize_dygraph(
place=place, use_amp=use_amp, use_multi_tensor=False)
self.assertEqual(np.allclose(output1, output2, rtol=1e-05), True)
np.testing.assert_allclose(output1, output2, rtol=1e-05)
for idx in range(len(params1)):
self.assertEqual(
np.allclose(params1[idx], params2[idx], rtol=1e-05), True)
np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05)
def _check_with_param_arrt(self, place, use_amp):
output1, params1 = self._momentum_optimize_dygraph(
......@@ -527,10 +526,9 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase):
use_amp=use_amp,
use_param_attr=True,
use_multi_tensor=False)
self.assertEqual(np.allclose(output1, output2, rtol=1e-05), True)
np.testing.assert_allclose(output1, output2, rtol=1e-05)
for idx in range(len(params1)):
self.assertEqual(
np.allclose(params1[idx], params2[idx], rtol=1e-05), True)
np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05)
def _check_with_param_group(self, place, use_amp):
output1, params1 = self._momentum_optimize_dygraph(
......@@ -543,10 +541,9 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase):
use_amp=use_amp,
use_param_group=True,
use_multi_tensor=False)
self.assertEqual(np.allclose(output1, output2, rtol=1e-05), True)
np.testing.assert_allclose(output1, output2, rtol=1e-05)
for idx in range(len(params1)):
self.assertEqual(
np.allclose(params1[idx], params2[idx], rtol=1e-05), True)
np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05)
def test_main(self):
for place in self._get_places():
......@@ -619,8 +616,7 @@ class TestMultiTensorMomentumStatic(unittest.TestCase):
use_amp=use_amp,
use_multi_tensor=False)
for idx in range(len(output1)):
self.assertEqual(
np.allclose(output1[idx], output2[idx], rtol=1e-05), True)
np.testing.assert_allclose(output1[idx], output2[idx], rtol=1e-05)
def test_main(self):
for place in self._get_places():
......
......@@ -149,7 +149,7 @@ class TestSoftmaxAPI(unittest.TestCase):
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = ref_softmax(self.x_np, axis=-1, dtype=None)
for r in res:
self.assertEqual(np.allclose(out_ref, r), True)
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_check(self):
paddle.disable_static(self.place)
......@@ -161,7 +161,7 @@ class TestSoftmaxAPI(unittest.TestCase):
out2 = m(x)
out_ref = ref_softmax(self.x_np, axis=-1, dtype=None)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
out1 = self.softmax(x, axis=0)
x = paddle.to_tensor(self.x_np)
......@@ -169,11 +169,11 @@ class TestSoftmaxAPI(unittest.TestCase):
out2 = m(x)
out_ref = ref_softmax(self.x_np, axis=0, dtype=None)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
out = self.softmax(x, dtype=np.float32)
out_ref = ref_softmax(self.x_np, axis=-1, dtype=np.float32)
self.assertEqual(np.allclose(out_ref, out.numpy()), True)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static()
......
......@@ -155,10 +155,12 @@ class TestConvertSyncBatchNormCase2(unittest.TestCase):
x = paddle.to_tensor(data)
bn_out = bn_model(x)
sybn_out = sybn_model(x)
self.assertTrue(
np.allclose(bn_out.numpy(), sybn_out.numpy()),
"Output has diff. \n" + "\nBN " + str(bn_out.numpy()) +
"\n" + "Sync BN " + str(sybn_out.numpy()))
np.testing.assert_allclose(
bn_out.numpy(),
sybn_out.numpy(),
rtol=1e-05,
err_msg='Output has diff. \n' + '\nBN ' +
str(bn_out.numpy()) + '\n' + 'Sync BN ' + str(sybn_out.numpy()))
class TestDygraphSyncBatchNormDataFormatError(unittest.TestCase):
......
......@@ -71,11 +71,7 @@ class TestMLUUniformRandomOp(OpTest):
def verify_output(self, outs):
hist, prob = self.output_hist(np.array(outs[0]))
np.testing.assert_allclose(hist,
prob,
rtol=0,
atol=0.01,
err_msg="hist: " + str(hist))
np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01)
class TestMLUUniformRandomOpSelectedRows(unittest.TestCase):
......@@ -103,11 +99,7 @@ class TestMLUUniformRandomOpSelectedRows(unittest.TestCase):
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [1000, 784])
hist, prob = output_hist(np.array(out.get_tensor()))
np.testing.assert_allclose(hist,
prob,
rtol=0,
atol=0.01,
err_msg="hist: " + str(hist))
np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01)
if __name__ == "__main__":
......
......@@ -313,8 +313,9 @@ class TestArgMaxAPI(unittest.TestCase):
tensor_input = paddle.to_tensor(numpy_input)
numpy_output = np.argmax(numpy_input, axis=self.axis)
paddle_output = paddle.argmax(tensor_input, axis=self.axis)
self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()),
True)
np.testing.assert_allclose(numpy_output,
paddle_output.numpy(),
rtol=1e-05)
paddle.enable_static()
for place in self.place:
......@@ -346,8 +347,9 @@ class TestArgMaxAPI_2(unittest.TestCase):
paddle_output = paddle.argmax(tensor_input,
axis=self.axis,
keepdim=self.keep_dims)
self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()),
True)
np.testing.assert_allclose(numpy_output,
paddle_output.numpy(),
rtol=1e-05)
self.assertEqual(numpy_output.shape, paddle_output.numpy().shape)
paddle.enable_static()
......@@ -375,8 +377,9 @@ class TestArgMaxAPI_3(unittest.TestCase):
tensor_input = paddle.to_tensor(numpy_input)
numpy_output = np.argmax(numpy_input).reshape([1])
paddle_output = paddle.argmax(tensor_input)
self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()),
True)
np.testing.assert_allclose(numpy_output,
paddle_output.numpy(),
rtol=1e-05)
self.assertEqual(numpy_output.shape, paddle_output.numpy().shape)
paddle.enable_static()
......
......@@ -254,8 +254,9 @@ class TestArgMinAPI(unittest.TestCase):
tensor_input = paddle.to_tensor(numpy_input)
numpy_output = np.argmin(numpy_input, axis=self.axis)
paddle_output = paddle.argmin(tensor_input, axis=self.axis)
self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()),
True)
np.testing.assert_allclose(numpy_output,
paddle_output.numpy(),
rtol=1e-05)
paddle.enable_static()
for place in self.place:
......@@ -287,8 +288,9 @@ class TestArgMaxAPI_2(unittest.TestCase):
paddle_output = paddle.argmin(tensor_input,
axis=self.axis,
keepdim=self.keep_dims)
self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()),
True)
np.testing.assert_allclose(numpy_output,
paddle_output.numpy(),
rtol=1e-05)
self.assertEqual(numpy_output.shape, paddle_output.numpy().shape)
paddle.enable_static()
......
......@@ -99,10 +99,7 @@ class TestAssignApi(unittest.TestCase):
exe = fluid.Executor(self.place)
[fetched_x] = exe.run(main_program, feed={}, fetch_list=[x])
np.testing.assert_allclose(fetched_x,
self.value,
err_msg="fetch_x=%s val=%s" %
(fetched_x, self.value))
np.testing.assert_allclose(fetched_x, self.value)
self.assertEqual(fetched_x.dtype, self.value.dtype)
......
......@@ -178,7 +178,7 @@ class TestRemainderOp(unittest.TestCase):
y = paddle.to_tensor(np_y)
z = x % y
z_expected = np.array([-0.9, 1.5, 1.3, -1.1])
self.assertEqual(np.allclose(z_expected, z.numpy()), True)
np.testing.assert_allclose(z_expected, z.numpy(), rtol=1e-05)
np_x = np.array([-3, 11, -2, 3])
np_y = np.array([-1, 2, 3, -2])
......@@ -186,7 +186,7 @@ class TestRemainderOp(unittest.TestCase):
y = paddle.to_tensor(np_y, dtype="int64")
z = x % y
z_expected = np.array([0, 1, 1, -1])
self.assertEqual(np.allclose(z_expected, z.numpy()), True)
np.testing.assert_allclose(z_expected, z.numpy(), rtol=1e-05)
if __name__ == '__main__':
......
......@@ -77,12 +77,7 @@ class TestMultinomialOp(OpTest):
# normalize the input to get the probability
prob = self.input_np / self.input_np.sum(axis=-1, keepdims=True)
sample_prob = self.sample_output(np.array(outs[0]))
np.testing.assert_allclose(sample_prob,
prob,
rtol=0,
atol=0.01,
err_msg="sample_prob: " + str(sample_prob) +
"\nprob: " + str(prob))
np.testing.assert_allclose(sample_prob, prob, rtol=0, atol=0.01)
class TestMultinomialOp2(TestMultinomialOp):
......@@ -125,12 +120,7 @@ class TestMultinomialApi(unittest.TestCase):
sample_prob = sample_output_one_dimension(out.numpy(), 4)
prob = x_numpy / x_numpy.sum(axis=-1, keepdims=True)
np.testing.assert_allclose(sample_prob,
prob,
rtol=0,
atol=0.01,
err_msg="sample_prob: " + str(sample_prob) +
"\nprob: " + str(prob))
np.testing.assert_allclose(sample_prob, prob, rtol=0, atol=0.01)
paddle.enable_static()
def test_dygraph2(self):
......@@ -143,12 +133,7 @@ class TestMultinomialApi(unittest.TestCase):
sample_prob = sample_output_two_dimension(out.numpy(), [3, 4])
prob = x_numpy / x_numpy.sum(axis=-1, keepdims=True)
np.testing.assert_allclose(sample_prob,
prob,
rtol=0,
atol=0.01,
err_msg="sample_prob: " + str(sample_prob) +
"\nprob: " + str(prob))
np.testing.assert_allclose(sample_prob, prob, rtol=0, atol=0.01)
paddle.enable_static()
def test_dygraph3(self):
......@@ -191,12 +176,7 @@ class TestMultinomialApi(unittest.TestCase):
sample_prob = sample_output_one_dimension(out, 4)
prob = x_np / x_np.sum(axis=-1, keepdims=True)
np.testing.assert_allclose(sample_prob,
prob,
rtol=0,
atol=0.01,
err_msg="sample_prob: " + str(sample_prob) +
"\nprob: " + str(prob))
np.testing.assert_allclose(sample_prob, prob, rtol=0, atol=0.01)
class TestMultinomialAlias(unittest.TestCase):
......
......@@ -105,7 +105,7 @@ class TestTakeAlongAxisAPI(unittest.TestCase):
out_ref = np.array(
np.take_along_axis(self.x_np, self.index_np, self.axis))
for out in res:
self.assertEqual(np.allclose(out, out_ref, rtol=1e-03), True)
np.testing.assert_allclose(out, out_ref, rtol=0.001)
def test_api_dygraph(self):
paddle.disable_static(self.place)
......@@ -114,7 +114,7 @@ class TestTakeAlongAxisAPI(unittest.TestCase):
out = paddle.take_along_axis(x_tensor, self.index, self.axis)
out_ref = np.array(
np.take_along_axis(self.x_np, self.index_np, self.axis))
self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-03), True)
np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.001)
paddle.enable_static()
......
......@@ -71,11 +71,7 @@ class TestNPUUniformRandomOp(OpTest):
def verify_output(self, outs):
hist, prob = self.output_hist(np.array(outs[0]))
np.testing.assert_allclose(hist,
prob,
rtol=0,
atol=0.01,
err_msg="hist: " + str(hist))
np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01)
class TestNPUUniformRandomOpSelectedRows(unittest.TestCase):
......@@ -103,11 +99,7 @@ class TestNPUUniformRandomOpSelectedRows(unittest.TestCase):
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [1000, 784])
hist, prob = output_hist(np.array(out.get_tensor()))
np.testing.assert_allclose(hist,
prob,
rtol=0,
atol=0.01,
err_msg="hist: " + str(hist))
np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01)
if __name__ == "__main__":
......
......@@ -728,11 +728,14 @@ class OpTest(unittest.TestCase):
for name in api_outs:
np_api = np.array(api_outs[name])
np_dyg = np.array(dygraph_outs[name])
self.assertTrue(
np.allclose(np_api, np_dyg, equal_nan=False),
"Output (" + name + ") has diff at " + str(place) +
"\nExpect " + str(np_dyg) + "\n" + "But Got" + str(np_api) +
" in class " + self.__class__.__name__)
np.testing.assert_allclose(
np_api,
np_dyg,
rtol=1e-05,
equal_nan=False,
err_msg='Output (' + name + ') has diff at ' + str(place) +
'\nExpect ' + str(np_dyg) + '\n' + 'But Got' + str(np_api) +
' in class ' + self.__class__.__name__)
def _calc_python_api_output(self, place, egr_inps=None, egr_oups=None):
""" set egr_inps and egr_oups = None if you want to create it by yourself.
......@@ -1041,12 +1044,15 @@ class OpTest(unittest.TestCase):
expect_out = np.array(expect_outs[i])
actual_out = np.array(actual_outs[i])
if inplace_atol is not None:
self.assertTrue(
np.allclose(expect_out, actual_out, atol=inplace_atol),
"Output (" + name + ") has diff at " + str(place) +
" when using and not using inplace" + "\nExpect " +
str(expect_out) + "\n" + "But Got" + str(actual_out) +
" in class " + self.__class__.__name__)
np.testing.assert_allclose(
expect_out,
actual_out,
rtol=1e-05,
atol=inplace_atol,
err_msg='Output (' + name + ') has diff at ' + str(place) +
' when using and not using inplace' + '\nExpect ' +
str(expect_out) + '\n' + 'But Got' + str(actual_out) +
' in class ' + self.__class__.__name__)
else:
np.testing.assert_array_equal(
expect_out,
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册