未验证 提交 9107b653 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle] use np.testing.assert_allclose instead of...

[CodeStyle] use np.testing.assert_allclose instead of self.assertTrue(np.allclose(...)) (part 2) (#45213)

* autofix (get ci log)

* retrigger ci

* fix test_gather_nd_op, wrong expected in dygraph

* fix test_activation_op, unpack static graph result

* fix test_auc_op, unpack static graph result

* fix test_bce_loss, unpack static graph result

* fix test_bce_with_logits_loss, unpack static graph result

* fix test_cond, unpack static graph result

* fix test_dygraph_weight_norm, wrong numpy reference when `axis=None`

* fix test_einsum, wrong matmul inputs

* fix test_elementwise_heaviside_op, unpack static graph result

* fix test_frac_api, unpack static graph result

* skip test_group_norm_op_v2, probably the wrong numpy reference

* fix test_imperative_double_grad, wrong subscript

* skip test_imperative_tensor_clear_gradient, ???

* skip test_layer_norm_op, probably the wrong numpy reference

* fix test_math_op_patch, unpack static graph results

* fix test_masked_select_op, unpack static graph results

* fix test_mse_loss, unpack static graph results

* fix test_multi_label_soft_margin_loss, unpack static graph results

* fix test_multi_dot_op, unpack static graph results

* fix test_nll_loss, unpack static graph results

* fix test_normalization_wrapper, unpack static graph results

* fix test_pass_builder, unpack static graph results

* fix test_prelu_op, possibly an extra comma

* fix test_psroi_pool_op, unpack static graph results

* fix test_queue, unpack static graph results

* fix test_reorder_lod_tensor, compare an item with a list

* fix test_rrelu_op, unpack static graph results

* fix test_searchsorted_op, unpack static graph results

* fix test_sigmoid_focal_loss, unpack static graph results

* fix test_smooth_l1_loss, unpack static graph results

* fix test_soft_margin_loss, unpack static graph results

* fix test_softmax2d, unpack static graph results

* fix test_square_error_cost, unpack static graph results

* fix test_tril_indices_op, unpack static graph results

* fix test_unsqueeze_op, mismatch numpy reference (axis)

* skip test_layers, `static_rlt` is missing an axis

* fix test_mnist, unpack PredictorTools result (also a list)

* fix test_build_strategy, unpack PredictorTools result

* fix test_mobile_net, unpack PredictorTools result

* fix test_resnet_v2, unpack PredictorTools result

* revert some changes

revert test_layers

revert test_group_norm_op_v2

revert test_layer_norm_op

revert test_imperative_tensor_clear_gradient

* fix test_normal, use flatten instead of reshape, (PR-CI-Windows-OPENBLAS)

* empty commit, trigger CI
上级 9556c688
...@@ -45,21 +45,32 @@ class TestResnetWithPass(unittest.TestCase): ...@@ -45,21 +45,32 @@ class TestResnetWithPass(unittest.TestCase):
st_pre = self.resnet_helper.predict_static(image) st_pre = self.resnet_helper.predict_static(image)
dy_jit_pre = self.resnet_helper.predict_dygraph_jit(image) dy_jit_pre = self.resnet_helper.predict_dygraph_jit(image)
predictor_pre = self.resnet_helper.predict_analysis_inference(image) predictor_pre = self.resnet_helper.predict_analysis_inference(image)
self.assertTrue(np.allclose(dy_pre, st_pre), np.testing.assert_allclose(
msg="dy_pre:\n {}\n, st_pre: \n{}.".format( dy_pre,
dy_pre, st_pre)) st_pre,
self.assertTrue(np.allclose(dy_jit_pre, st_pre), rtol=1e-05,
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format( err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre))
np.testing.assert_allclose(
dy_jit_pre,
st_pre,
rtol=1e-05,
err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format(
dy_jit_pre, st_pre)) dy_jit_pre, st_pre))
self.assertTrue(np.allclose(predictor_pre, st_pre), np.testing.assert_allclose(
msg="predictor_pre:\n {}\n, st_pre: \n{}.".format( predictor_pre,
st_pre,
rtol=1e-05,
err_msg='predictor_pre:\n {}\n, st_pre: \n{}.'.format(
predictor_pre, st_pre)) predictor_pre, st_pre))
def test_resnet(self): def test_resnet(self):
static_loss = self.train(to_static=True) static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False) dygraph_loss = self.train(to_static=False)
self.assertTrue(np.allclose(static_loss, dygraph_loss), np.testing.assert_allclose(
msg="static_loss: {} \n dygraph_loss: {}".format( static_loss,
dygraph_loss,
rtol=1e-05,
err_msg='static_loss: {} \n dygraph_loss: {}'.format(
static_loss, dygraph_loss)) static_loss, dygraph_loss))
self.verify_predict() self.verify_predict()
......
...@@ -165,14 +165,20 @@ class TestMNISTWithToStatic(TestMNIST): ...@@ -165,14 +165,20 @@ class TestMNISTWithToStatic(TestMNIST):
def test_mnist_to_static(self): def test_mnist_to_static(self):
dygraph_loss = self.train_dygraph() dygraph_loss = self.train_dygraph()
static_loss = self.train_static() static_loss = self.train_static()
self.assertTrue(np.allclose(dygraph_loss, static_loss), np.testing.assert_allclose(
msg='dygraph is {}\n static_res is \n{}'.format( dygraph_loss,
static_loss,
rtol=1e-05,
err_msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_loss, static_loss)) dygraph_loss, static_loss))
with _test_eager_guard(): with _test_eager_guard():
dygraph_loss = self.train_dygraph() dygraph_loss = self.train_dygraph()
static_loss = self.train_static() static_loss = self.train_static()
self.assertTrue(np.allclose(dygraph_loss, static_loss), np.testing.assert_allclose(
msg='dygraph is {}\n static_res is \n{}'.format( dygraph_loss,
static_loss,
rtol=1e-05,
err_msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_loss, static_loss)) dygraph_loss, static_loss))
def test_mnist_declarative_cpu_vs_mkldnn(self): def test_mnist_declarative_cpu_vs_mkldnn(self):
...@@ -182,8 +188,11 @@ class TestMNISTWithToStatic(TestMNIST): ...@@ -182,8 +188,11 @@ class TestMNISTWithToStatic(TestMNIST):
dygraph_loss_mkldnn = self.train_dygraph() dygraph_loss_mkldnn = self.train_dygraph()
finally: finally:
fluid.set_flags({'FLAGS_use_mkldnn': False}) fluid.set_flags({'FLAGS_use_mkldnn': False})
self.assertTrue(np.allclose(dygraph_loss_cpu, dygraph_loss_mkldnn), np.testing.assert_allclose(
msg='cpu dygraph is {}\n mkldnn dygraph is \n{}'.format( dygraph_loss_cpu,
dygraph_loss_mkldnn,
rtol=1e-05,
err_msg='cpu dygraph is {}\n mkldnn dygraph is \n{}'.format(
dygraph_loss_cpu, dygraph_loss_mkldnn)) dygraph_loss_cpu, dygraph_loss_mkldnn))
def train(self, to_static=False): def train(self, to_static=False):
...@@ -250,15 +259,21 @@ class TestMNISTWithToStatic(TestMNIST): ...@@ -250,15 +259,21 @@ class TestMNISTWithToStatic(TestMNIST):
# load in static mode # load in static mode
static_infer_out = self.jit_load_and_run_inference_static( static_infer_out = self.jit_load_and_run_inference_static(
model_save_dir, model_filename, params_filename, inputs) model_save_dir, model_filename, params_filename, inputs)
self.assertTrue(np.allclose(gt_out.numpy(), static_infer_out)) np.testing.assert_allclose(gt_out.numpy(),
static_infer_out,
rtol=1e-05)
# load in dygraph mode # load in dygraph mode
dygraph_infer_out = self.jit_load_and_run_inference_dygraph( dygraph_infer_out = self.jit_load_and_run_inference_dygraph(
model_save_prefix, inputs) model_save_prefix, inputs)
self.assertTrue(np.allclose(gt_out.numpy(), dygraph_infer_out)) np.testing.assert_allclose(gt_out.numpy(),
dygraph_infer_out,
rtol=1e-05)
# load in Paddle-Inference # load in Paddle-Inference
predictor_infer_out = self.predictor_load_and_run_inference_analysis( predictor_infer_out = self.predictor_load_and_run_inference_analysis(
model_save_dir, model_filename, params_filename, inputs) model_save_dir, model_filename, params_filename, inputs)
self.assertTrue(np.allclose(gt_out.numpy(), predictor_infer_out)) np.testing.assert_allclose(gt_out.numpy(),
predictor_infer_out,
rtol=1e-05)
@switch_to_static_graph @switch_to_static_graph
def jit_load_and_run_inference_static(self, model_path, model_filename, def jit_load_and_run_inference_static(self, model_path, model_filename,
...@@ -287,7 +302,7 @@ class TestMNISTWithToStatic(TestMNIST): ...@@ -287,7 +302,7 @@ class TestMNISTWithToStatic(TestMNIST):
params_filename, inputs): params_filename, inputs):
output = PredictorTools(model_path, model_filename, params_filename, output = PredictorTools(model_path, model_filename, params_filename,
inputs) inputs)
out = output() out, = output()
return out return out
......
...@@ -556,7 +556,7 @@ def predict_dygraph_jit(args, data): ...@@ -556,7 +556,7 @@ def predict_dygraph_jit(args, data):
def predict_analysis_inference(args, data): def predict_analysis_inference(args, data):
output = PredictorTools(args.model_save_dir, args.model_filename, output = PredictorTools(args.model_save_dir, args.model_filename,
args.params_filename, [data]) args.params_filename, [data])
out = output() out, = output()
return out return out
...@@ -585,8 +585,11 @@ class TestMobileNet(unittest.TestCase): ...@@ -585,8 +585,11 @@ class TestMobileNet(unittest.TestCase):
def assert_same_loss(self, model_name): def assert_same_loss(self, model_name):
dy_out = self.train(model_name, to_static=False) dy_out = self.train(model_name, to_static=False)
st_out = self.train(model_name, to_static=True) st_out = self.train(model_name, to_static=True)
self.assertTrue(np.allclose(dy_out, st_out), np.testing.assert_allclose(dy_out,
msg="dy_out: {}, st_out: {}".format(dy_out, st_out)) st_out,
rtol=1e-05,
err_msg='dy_out: {}, st_out: {}'.format(
dy_out, st_out))
def assert_same_predict(self, model_name): def assert_same_predict(self, model_name):
self.args.model = model_name self.args.model = model_name
...@@ -602,14 +605,23 @@ class TestMobileNet(unittest.TestCase): ...@@ -602,14 +605,23 @@ class TestMobileNet(unittest.TestCase):
st_pre = predict_static(self.args, image) st_pre = predict_static(self.args, image)
dy_jit_pre = predict_dygraph_jit(self.args, image) dy_jit_pre = predict_dygraph_jit(self.args, image)
predictor_pre = predict_analysis_inference(self.args, image) predictor_pre = predict_analysis_inference(self.args, image)
self.assertTrue(np.allclose(dy_pre, st_pre), np.testing.assert_allclose(
msg="dy_pre:\n {}\n, st_pre: \n{}.".format( dy_pre,
dy_pre, st_pre)) st_pre,
self.assertTrue(np.allclose(dy_jit_pre, st_pre), rtol=1e-05,
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format( err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre))
np.testing.assert_allclose(
dy_jit_pre,
st_pre,
rtol=1e-05,
err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format(
dy_jit_pre, st_pre)) dy_jit_pre, st_pre))
self.assertTrue(np.allclose(predictor_pre, st_pre, atol=1e-5), np.testing.assert_allclose(
msg="inference_pred_res:\n {}\n, st_pre: \n{}.".format( predictor_pre,
st_pre,
rtol=1e-05,
atol=1e-05,
err_msg='inference_pred_res:\n {}\n, st_pre: \n{}.'.format(
predictor_pre, st_pre)) predictor_pre, st_pre))
def test_mobile_net(self): def test_mobile_net(self):
......
...@@ -339,7 +339,7 @@ class ResNetHelper: ...@@ -339,7 +339,7 @@ class ResNetHelper:
def predict_analysis_inference(self, data): def predict_analysis_inference(self, data):
output = PredictorTools(self.model_save_dir, self.model_filename, output = PredictorTools(self.model_save_dir, self.model_filename,
self.params_filename, [data]) self.params_filename, [data])
out = output() out, = output()
return out return out
...@@ -358,21 +358,32 @@ class TestResnet(unittest.TestCase): ...@@ -358,21 +358,32 @@ class TestResnet(unittest.TestCase):
st_pre = self.resnet_helper.predict_static(image) st_pre = self.resnet_helper.predict_static(image)
dy_jit_pre = self.resnet_helper.predict_dygraph_jit(image) dy_jit_pre = self.resnet_helper.predict_dygraph_jit(image)
predictor_pre = self.resnet_helper.predict_analysis_inference(image) predictor_pre = self.resnet_helper.predict_analysis_inference(image)
self.assertTrue(np.allclose(dy_pre, st_pre), np.testing.assert_allclose(
msg="dy_pre:\n {}\n, st_pre: \n{}.".format( dy_pre,
dy_pre, st_pre)) st_pre,
self.assertTrue(np.allclose(dy_jit_pre, st_pre), rtol=1e-05,
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format( err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre))
np.testing.assert_allclose(
dy_jit_pre,
st_pre,
rtol=1e-05,
err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format(
dy_jit_pre, st_pre)) dy_jit_pre, st_pre))
self.assertTrue(np.allclose(predictor_pre, st_pre), np.testing.assert_allclose(
msg="predictor_pre:\n {}\n, st_pre: \n{}.".format( predictor_pre,
st_pre,
rtol=1e-05,
err_msg='predictor_pre:\n {}\n, st_pre: \n{}.'.format(
predictor_pre, st_pre)) predictor_pre, st_pre))
def test_resnet(self): def test_resnet(self):
static_loss = self.train(to_static=True) static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False) dygraph_loss = self.train(to_static=False)
self.assertTrue(np.allclose(static_loss, dygraph_loss), np.testing.assert_allclose(
msg="static_loss: {} \n dygraph_loss: {}".format( static_loss,
dygraph_loss,
rtol=1e-05,
err_msg='static_loss: {} \n dygraph_loss: {}'.format(
static_loss, dygraph_loss)) static_loss, dygraph_loss))
self.verify_predict() self.verify_predict()
......
...@@ -109,8 +109,11 @@ class TestResnet(unittest.TestCase): ...@@ -109,8 +109,11 @@ class TestResnet(unittest.TestCase):
def test_resnet(self): def test_resnet(self):
static_loss = self.train(to_static=True) static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False) dygraph_loss = self.train(to_static=False)
self.assertTrue(np.allclose(static_loss, dygraph_loss), np.testing.assert_allclose(
msg="static_loss: {} \n dygraph_loss: {}".format( static_loss,
dygraph_loss,
rtol=1e-05,
err_msg='static_loss: {} \n dygraph_loss: {}'.format(
static_loss, dygraph_loss)) static_loss, dygraph_loss))
......
...@@ -118,8 +118,12 @@ class TestResnet(unittest.TestCase): ...@@ -118,8 +118,12 @@ class TestResnet(unittest.TestCase):
static_loss = self.train(to_static=True) static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False) dygraph_loss = self.train(to_static=False)
# NOTE: In pure fp16 training, loss is not stable, so we enlarge atol here. # NOTE: In pure fp16 training, loss is not stable, so we enlarge atol here.
self.assertTrue(np.allclose(static_loss, dygraph_loss, atol=1e-3), np.testing.assert_allclose(
msg="static_loss: {} \n dygraph_loss: {}".format( static_loss,
dygraph_loss,
rtol=1e-05,
atol=0.001,
err_msg='static_loss: {} \n dygraph_loss: {}'.format(
static_loss, dygraph_loss)) static_loss, dygraph_loss))
......
...@@ -340,7 +340,7 @@ class TestResnet(unittest.TestCase): ...@@ -340,7 +340,7 @@ class TestResnet(unittest.TestCase):
def predict_analysis_inference(self, data): def predict_analysis_inference(self, data):
output = PredictorTools(self.model_save_dir, self.model_filename, output = PredictorTools(self.model_save_dir, self.model_filename,
self.params_filename, [data]) self.params_filename, [data])
out = output() out, = output()
return out return out
def train(self, to_static): def train(self, to_static):
...@@ -353,21 +353,32 @@ class TestResnet(unittest.TestCase): ...@@ -353,21 +353,32 @@ class TestResnet(unittest.TestCase):
st_pre = self.predict_static(image) st_pre = self.predict_static(image)
dy_jit_pre = self.predict_dygraph_jit(image) dy_jit_pre = self.predict_dygraph_jit(image)
predictor_pre = self.predict_analysis_inference(image) predictor_pre = self.predict_analysis_inference(image)
self.assertTrue(np.allclose(dy_pre, st_pre), np.testing.assert_allclose(
msg="dy_pre:\n {}\n, st_pre: \n{}.".format( dy_pre,
dy_pre, st_pre)) st_pre,
self.assertTrue(np.allclose(dy_jit_pre, st_pre), rtol=1e-05,
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format( err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre))
np.testing.assert_allclose(
dy_jit_pre,
st_pre,
rtol=1e-05,
err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format(
dy_jit_pre, st_pre)) dy_jit_pre, st_pre))
self.assertTrue(np.allclose(predictor_pre, st_pre), np.testing.assert_allclose(
msg="predictor_pre:\n {}\n, st_pre: \n{}.".format( predictor_pre,
st_pre,
rtol=1e-05,
err_msg='predictor_pre:\n {}\n, st_pre: \n{}.'.format(
predictor_pre, st_pre)) predictor_pre, st_pre))
def test_resnet(self): def test_resnet(self):
static_loss = self.train(to_static=True) static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False) dygraph_loss = self.train(to_static=False)
self.assertTrue(np.allclose(static_loss, dygraph_loss), np.testing.assert_allclose(
msg="static_loss: {} \n dygraph_loss: {}".format( static_loss,
dygraph_loss,
rtol=1e-05,
err_msg='static_loss: {} \n dygraph_loss: {}'.format(
static_loss, dygraph_loss)) static_loss, dygraph_loss))
self.verify_predict() self.verify_predict()
......
...@@ -534,7 +534,7 @@ class TestSinh(TestActivation): ...@@ -534,7 +534,7 @@ class TestSinh(TestActivation):
x = fluid.dygraph.to_variable(np_x) x = fluid.dygraph.to_variable(np_x)
z = fluid.layers.sinh(x).numpy() z = fluid.layers.sinh(x).numpy()
z_expected = np.sinh(np_x) z_expected = np.sinh(np_x)
self.assertTrue(np.allclose(z, z_expected)) np.testing.assert_allclose(z, z_expected, rtol=1e-05)
def test_api(self): def test_api(self):
test_data_shape = [11, 17] test_data_shape = [11, 17]
...@@ -549,12 +549,12 @@ class TestSinh(TestActivation): ...@@ -549,12 +549,12 @@ class TestSinh(TestActivation):
pd_sinh_out = fluid.layers.sinh(data_x) pd_sinh_out = fluid.layers.sinh(data_x)
exe = fluid.Executor(place=fluid.CPUPlace()) exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
np_sinh_res = exe.run(fluid.default_main_program(), np_sinh_res, = exe.run(fluid.default_main_program(),
feed={"data_x": input_x}, feed={"data_x": input_x},
fetch_list=[pd_sinh_out]) fetch_list=[pd_sinh_out])
expected_res = np.sinh(input_x) expected_res = np.sinh(input_x)
self.assertTrue(np.allclose(np_sinh_res, expected_res)) np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
def test_backward(self): def test_backward(self):
test_data_shape = [11, 17] test_data_shape = [11, 17]
...@@ -607,7 +607,7 @@ class TestCosh(TestActivation): ...@@ -607,7 +607,7 @@ class TestCosh(TestActivation):
x = fluid.dygraph.to_variable(np_x) x = fluid.dygraph.to_variable(np_x)
z = fluid.layers.cosh(x).numpy() z = fluid.layers.cosh(x).numpy()
z_expected = np.cosh(np_x) z_expected = np.cosh(np_x)
self.assertTrue(np.allclose(z, z_expected)) np.testing.assert_allclose(z, z_expected, rtol=1e-05)
def test_api(self): def test_api(self):
test_data_shape = [11, 17] test_data_shape = [11, 17]
...@@ -622,12 +622,12 @@ class TestCosh(TestActivation): ...@@ -622,12 +622,12 @@ class TestCosh(TestActivation):
pd_cosh_out = paddle.cosh(data_x) pd_cosh_out = paddle.cosh(data_x)
exe = fluid.Executor(place=fluid.CPUPlace()) exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
np_cosh_res = exe.run(fluid.default_main_program(), np_cosh_res, = exe.run(fluid.default_main_program(),
feed={"data_x": input_x}, feed={"data_x": input_x},
fetch_list=[pd_cosh_out]) fetch_list=[pd_cosh_out])
expected_res = np.cosh(input_x) expected_res = np.cosh(input_x)
self.assertTrue(np.allclose(np_cosh_res, expected_res)) np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
def test_backward(self): def test_backward(self):
test_data_shape = [11, 17] test_data_shape = [11, 17]
...@@ -2261,11 +2261,11 @@ class TestLog2(TestActivation): ...@@ -2261,11 +2261,11 @@ class TestLog2(TestActivation):
out1 = paddle.log2(data_x) out1 = paddle.log2(data_x)
exe = paddle.static.Executor(place=fluid.CPUPlace()) exe = paddle.static.Executor(place=fluid.CPUPlace())
exe.run(paddle.static.default_startup_program()) exe.run(paddle.static.default_startup_program())
res1 = exe.run(paddle.static.default_main_program(), res1, = exe.run(paddle.static.default_main_program(),
feed={"data_x": input_x}, feed={"data_x": input_x},
fetch_list=[out1]) fetch_list=[out1])
expected_res = np.log2(input_x) expected_res = np.log2(input_x)
self.assertTrue(np.allclose(res1, expected_res)) np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
# dygraph # dygraph
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -2274,7 +2274,7 @@ class TestLog2(TestActivation): ...@@ -2274,7 +2274,7 @@ class TestLog2(TestActivation):
z = paddle.log2(data_x) z = paddle.log2(data_x)
np_z = z.numpy() np_z = z.numpy()
z_expected = np.array(np.log2(np_x)) z_expected = np.array(np.log2(np_x))
self.assertTrue(np.allclose(np_z, z_expected)) np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
class TestLog10(TestActivation): class TestLog10(TestActivation):
...@@ -2314,11 +2314,11 @@ class TestLog10(TestActivation): ...@@ -2314,11 +2314,11 @@ class TestLog10(TestActivation):
out1 = paddle.log10(data_x) out1 = paddle.log10(data_x)
exe = paddle.static.Executor(place=paddle.CPUPlace()) exe = paddle.static.Executor(place=paddle.CPUPlace())
exe.run(paddle.static.default_startup_program()) exe.run(paddle.static.default_startup_program())
res1 = exe.run(paddle.static.default_main_program(), res1, = exe.run(paddle.static.default_main_program(),
feed={"data_x": input_x}, feed={"data_x": input_x},
fetch_list=[out1]) fetch_list=[out1])
expected_res = np.log10(input_x) expected_res = np.log10(input_x)
self.assertTrue(np.allclose(res1, expected_res)) np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
# dygraph # dygraph
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -2327,7 +2327,7 @@ class TestLog10(TestActivation): ...@@ -2327,7 +2327,7 @@ class TestLog10(TestActivation):
z = paddle.log10(data_x) z = paddle.log10(data_x)
np_z = z.numpy() np_z = z.numpy()
z_expected = np.array(np.log10(np_x)) z_expected = np.array(np.log10(np_x))
self.assertTrue(np.allclose(np_z, z_expected)) np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
class TestLog1p(TestActivation): class TestLog1p(TestActivation):
...@@ -2361,11 +2361,11 @@ class TestLog1p(TestActivation): ...@@ -2361,11 +2361,11 @@ class TestLog1p(TestActivation):
out1 = paddle.log1p(data_x) out1 = paddle.log1p(data_x)
exe = fluid.Executor(place=fluid.CPUPlace()) exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
res1 = exe.run(fluid.default_main_program(), res1, = exe.run(fluid.default_main_program(),
feed={"data_x": input_x}, feed={"data_x": input_x},
fetch_list=[out1]) fetch_list=[out1])
expected_res = np.log1p(input_x) expected_res = np.log1p(input_x)
self.assertTrue(np.allclose(res1, expected_res)) np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
# dygraph # dygraph
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -2374,7 +2374,7 @@ class TestLog1p(TestActivation): ...@@ -2374,7 +2374,7 @@ class TestLog1p(TestActivation):
z = paddle.log1p(data_x) z = paddle.log1p(data_x)
np_z = z.numpy() np_z = z.numpy()
z_expected = np.array(np.log1p(np_x)) z_expected = np.array(np.log1p(np_x))
self.assertTrue(np.allclose(np_z, z_expected)) np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
class TestSquare(TestActivation): class TestSquare(TestActivation):
......
...@@ -130,14 +130,14 @@ class TestAucAPI(unittest.TestCase): ...@@ -130,14 +130,14 @@ class TestAucAPI(unittest.TestCase):
y = np.array([0, 0, 1, 0]).astype('int64') y = np.array([0, 0, 1, 0]).astype('int64')
z = np.array([1, 1, 1, 1]).astype('float32') z = np.array([1, 1, 1, 1]).astype('float32')
output = exe.run(feed={ output, = exe.run(feed={
"input": x, "input": x,
"label": y, "label": y,
"ins_tag_weight": z "ins_tag_weight": z
}, },
fetch_list=[result[0]]) fetch_list=[result[0]])
auc_np = np.array([0.66666667]).astype("float32") auc_np = np.array([0.66666667]).astype("float32")
self.assertTrue(np.allclose(output, auc_np)) np.testing.assert_allclose(output, auc_np, rtol=1e-05)
class TestAucOpError(unittest.TestCase): class TestAucOpError(unittest.TestCase):
......
...@@ -43,7 +43,7 @@ def test_static_layer(place, ...@@ -43,7 +43,7 @@ def test_static_layer(place,
bce_loss = paddle.nn.loss.BCELoss(reduction=reduction) bce_loss = paddle.nn.loss.BCELoss(reduction=reduction)
res = bce_loss(input, label) res = bce_loss(input, label)
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np "label": label_np
...@@ -83,7 +83,7 @@ def test_static_functional(place, ...@@ -83,7 +83,7 @@ def test_static_functional(place,
label, label,
reduction=reduction) reduction=reduction)
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np "label": label_np
...@@ -171,16 +171,20 @@ class TestBCELoss(unittest.TestCase): ...@@ -171,16 +171,20 @@ class TestBCELoss(unittest.TestCase):
dy_result = test_dygraph_layer(place, input_np, label_np, dy_result = test_dygraph_layer(place, input_np, label_np,
reduction) reduction)
expected = calc_bceloss(input_np, label_np, reduction) expected = calc_bceloss(input_np, label_np, reduction)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static_functional( static_functional = test_static_functional(
place, input_np, label_np, reduction) place, input_np, label_np, reduction)
dy_functional = test_dygraph_functional(place, input_np, dy_functional = test_dygraph_functional(place, input_np,
label_np, reduction) label_np, reduction)
self.assertTrue(np.allclose(static_functional, expected)) np.testing.assert_allclose(static_functional,
self.assertTrue(np.allclose(static_functional, dy_functional)) expected,
self.assertTrue(np.allclose(dy_functional, expected)) rtol=1e-05)
np.testing.assert_allclose(static_functional,
dy_functional,
rtol=1e-05)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
def test_BCELoss_weight(self): def test_BCELoss_weight(self):
input_np = np.random.uniform(0.1, 0.8, input_np = np.random.uniform(0.1, 0.8,
...@@ -205,9 +209,9 @@ class TestBCELoss(unittest.TestCase): ...@@ -205,9 +209,9 @@ class TestBCELoss(unittest.TestCase):
label_np, label_np,
reduction, reduction,
weight_np=weight_np) weight_np=weight_np)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static_functional(place, static_functional = test_static_functional(place,
input_np, input_np,
label_np, label_np,
...@@ -218,9 +222,11 @@ class TestBCELoss(unittest.TestCase): ...@@ -218,9 +222,11 @@ class TestBCELoss(unittest.TestCase):
label_np, label_np,
reduction, reduction,
weight_np=weight_np) weight_np=weight_np)
self.assertTrue(np.allclose(static_functional, expected)) np.testing.assert_allclose(static_functional, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_functional, dy_functional)) np.testing.assert_allclose(static_functional,
self.assertTrue(np.allclose(dy_functional, expected)) dy_functional,
rtol=1e-05)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
def test_BCELoss_error(self): def test_BCELoss_error(self):
paddle.disable_static() paddle.disable_static()
......
...@@ -79,7 +79,7 @@ def test_static(place, ...@@ -79,7 +79,7 @@ def test_static(place,
else: else:
res = call_bce_layer(logit, label, weight, reduction, pos_weight) res = call_bce_layer(logit, label, weight, reduction, pos_weight)
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
static_result = exe.run(prog, feed=feed_dict, fetch_list=[res]) static_result, = exe.run(prog, feed=feed_dict, fetch_list=[res])
return static_result return static_result
...@@ -152,9 +152,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase): ...@@ -152,9 +152,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
reduction=reduction) reduction=reduction)
expected = calc_bce_with_logits_loss(logit_np, label_np, expected = calc_bce_with_logits_loss(logit_np, label_np,
reduction) reduction)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static(place, static_functional = test_static(place,
logit_np, logit_np,
label_np, label_np,
...@@ -173,10 +173,16 @@ class TestBCEWithLogitsLoss(unittest.TestCase): ...@@ -173,10 +173,16 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
reduction=reduction, reduction=reduction,
functional=True) functional=True)
self.assertTrue(np.allclose(static_functional, expected)) np.testing.assert_allclose(static_functional,
self.assertTrue(np.allclose(static_functional, dy_functional)) expected,
self.assertTrue(np.allclose(dy_functional, expected)) rtol=1e-05)
self.assertTrue(np.allclose(eager_functional, expected)) np.testing.assert_allclose(static_functional,
dy_functional,
rtol=1e-05)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
np.testing.assert_allclose(eager_functional,
expected,
rtol=1e-05)
def test_BCEWithLogitsLoss_weight(self): def test_BCEWithLogitsLoss_weight(self):
logit_np = np.random.uniform(0.1, 0.8, logit_np = np.random.uniform(0.1, 0.8,
...@@ -201,9 +207,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase): ...@@ -201,9 +207,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
label_np, label_np,
reduction, reduction,
weight_np=weight_np) weight_np=weight_np)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static(place, static_functional = test_static(place,
logit_np, logit_np,
label_np, label_np,
...@@ -216,9 +222,11 @@ class TestBCEWithLogitsLoss(unittest.TestCase): ...@@ -216,9 +222,11 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
weight_np=weight_np, weight_np=weight_np,
reduction=reduction, reduction=reduction,
functional=True) functional=True)
self.assertTrue(np.allclose(static_functional, expected)) np.testing.assert_allclose(static_functional, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_functional, dy_functional)) np.testing.assert_allclose(static_functional,
self.assertTrue(np.allclose(dy_functional, expected)) dy_functional,
rtol=1e-05)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
def test_BCEWithLogitsLoss_pos_weight(self): def test_BCEWithLogitsLoss_pos_weight(self):
logit_np = np.random.uniform(0.1, 0.8, logit_np = np.random.uniform(0.1, 0.8,
...@@ -236,9 +244,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase): ...@@ -236,9 +244,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
reduction, pos_weight_np) reduction, pos_weight_np)
expected = calc_bce_with_logits_loss(logit_np, label_np, reduction, expected = calc_bce_with_logits_loss(logit_np, label_np, reduction,
weight_np, pos_weight_np) weight_np, pos_weight_np)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static(place, static_functional = test_static(place,
logit_np, logit_np,
label_np, label_np,
...@@ -253,9 +261,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase): ...@@ -253,9 +261,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
reduction, reduction,
pos_weight_np, pos_weight_np,
functional=True) functional=True)
self.assertTrue(np.allclose(static_functional, expected)) np.testing.assert_allclose(static_functional, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_functional, dy_functional)) np.testing.assert_allclose(static_functional, dy_functional, rtol=1e-05)
self.assertTrue(np.allclose(dy_functional, expected)) np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
def test_BCEWithLogitsLoss_error(self): def test_BCEWithLogitsLoss_error(self):
paddle.disable_static() paddle.disable_static()
......
...@@ -62,9 +62,10 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -62,9 +62,10 @@ class TestCondInputOutput(unittest.TestCase):
place = fluid.CUDAPlace( place = fluid.CUDAPlace(
0) if core.is_compiled_with_cuda() else fluid.CPUPlace() 0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
ret = exe.run(main_program, fetch_list=[out.name]) ret, = exe.run(main_program, fetch_list=[out.name])
self.assertTrue( np.testing.assert_allclose(np.asarray(ret),
np.allclose(np.asarray(ret), np.full((3, 2), -1, np.int32))) np.full((3, 2), -1, np.int32),
rtol=1e-05)
def test_return_var_tuple(self): def test_return_var_tuple(self):
""" """
...@@ -103,10 +104,12 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -103,10 +104,12 @@ class TestCondInputOutput(unittest.TestCase):
0) if core.is_compiled_with_cuda() else fluid.CPUPlace() 0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
ret = exe.run(main_program, fetch_list=out) ret = exe.run(main_program, fetch_list=out)
self.assertTrue( np.testing.assert_allclose(np.asarray(ret[0]),
np.allclose(np.asarray(ret[0]), np.full((1, 2), 1, np.int32))) np.full((1, 2), 1, np.int32),
self.assertTrue( rtol=1e-05)
np.allclose(np.asarray(ret[1]), np.full((2, 3), True, bool))) np.testing.assert_allclose(np.asarray(ret[1]),
np.full((2, 3), True, bool),
rtol=1e-05)
def test_pass_and_modify_var(self): def test_pass_and_modify_var(self):
""" """
...@@ -142,12 +145,12 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -142,12 +145,12 @@ class TestCondInputOutput(unittest.TestCase):
exe = fluid.Executor(place) exe = fluid.Executor(place)
for feed_i in range(5): for feed_i in range(5):
expected_a = 7 * (feed_i + 1) if feed_i % 2 == 0 else 8 - feed_i expected_a = 7 * (feed_i + 1) if feed_i % 2 == 0 else 8 - feed_i
ret = exe.run(main_program, ret, = exe.run(main_program,
feed={'i': np.full((1), feed_i, np.int32)}, feed={'i': np.full((1), feed_i, np.int32)},
fetch_list=[a]) fetch_list=[a])
self.assertTrue( np.testing.assert_allclose(np.asarray(ret),
np.allclose(np.asarray(ret), np.full((3, 2, 1), expected_a, np.int32),
np.full((3, 2, 1), expected_a, np.int32))) rtol=1e-05)
def test_return_none(self): def test_return_none(self):
""" """
......
...@@ -49,7 +49,7 @@ class TestDygraphWeightNorm(unittest.TestCase): ...@@ -49,7 +49,7 @@ class TestDygraphWeightNorm(unittest.TestCase):
ndims = len(shape) ndims = len(shape)
shape_numel = reduce(lambda x, y: x * y, shape) shape_numel = reduce(lambda x, y: x * y, shape)
if dim == -1: if dim == -1:
return np.linalg.norm(w, axis=None, keepdims=True) return np.linalg.norm(w, axis=None, keepdims=True).flatten()
elif dim == 0: elif dim == 0:
tile_shape = list(w.shape) tile_shape = list(w.shape)
tile_shape[0] = 1 tile_shape[0] = 1
...@@ -132,7 +132,10 @@ class TestDygraphWeightNorm(unittest.TestCase): ...@@ -132,7 +132,10 @@ class TestDygraphWeightNorm(unittest.TestCase):
expect_output = self.weight_normalize(before_weight, self.dim) expect_output = self.weight_normalize(before_weight, self.dim)
for expect, actual in zip(expect_output, self.actual_outputs): for expect, actual in zip(expect_output, self.actual_outputs):
self.assertTrue(np.allclose(np.array(actual), expect, atol=0.001)) np.testing.assert_allclose(np.array(actual),
expect,
rtol=1e-05,
atol=0.001)
class TestDygraphWeightNormCase1(TestDygraphWeightNorm): class TestDygraphWeightNormCase1(TestDygraphWeightNorm):
......
...@@ -336,9 +336,12 @@ class TestNumpyTests(unittest.TestCase): ...@@ -336,9 +336,12 @@ class TestNumpyTests(unittest.TestCase):
def check_output_equal(self, actual, expect, rtol=1.e-5, atol=1.e-8): def check_output_equal(self, actual, expect, rtol=1.e-5, atol=1.e-8):
error_msg = 'Output has diff at place:{}. \nExpect: {} \nBut Got: {} in class {}' error_msg = 'Output has diff at place:{}. \nExpect: {} \nBut Got: {} in class {}'
self.assertTrue( np.testing.assert_allclose(actual,
np.allclose(actual, expect, rtol=rtol, atol=atol), expect,
error_msg.format(paddle.get_device(), expect, actual, rtol=rtol,
atol=atol,
err_msg=error_msg.format(
paddle.get_device(), expect, actual,
self.__class__.__name__)) self.__class__.__name__))
def check_output(self, eqn, *ops): def check_output(self, eqn, *ops):
...@@ -425,7 +428,7 @@ class TestNumpyTests(unittest.TestCase): ...@@ -425,7 +428,7 @@ class TestNumpyTests(unittest.TestCase):
p = np.ones((1, 5)) / 2 p = np.ones((1, 5)) / 2
q = np.ones((5, 5)) / 2 q = np.ones((5, 5)) / 2
self.check_output("...ij,...jk->...ik", p, p) self.check_output("...ij,...jk->...ik", p, p.T)
self.check_output("...ij,...jk->...ik", p, q) self.check_output("...ij,...jk->...ik", p, q)
x = np.eye(2).astype('float') x = np.eye(2).astype('float')
......
...@@ -351,9 +351,12 @@ class TestNumpyTests(unittest.TestCase): ...@@ -351,9 +351,12 @@ class TestNumpyTests(unittest.TestCase):
def check_output_equal(self, actual, expect, rtol=1.e-5, atol=1.e-8): def check_output_equal(self, actual, expect, rtol=1.e-5, atol=1.e-8):
error_msg = 'Output has diff at place:{}. \nExpect: {} \nBut Got: {} in class {}' error_msg = 'Output has diff at place:{}. \nExpect: {} \nBut Got: {} in class {}'
self.assertTrue( np.testing.assert_allclose(actual,
np.allclose(actual, expect, rtol=rtol, atol=atol), expect,
error_msg.format(paddle.get_device(), expect, actual, rtol=rtol,
atol=atol,
err_msg=error_msg.format(
paddle.get_device(), expect, actual,
self.__class__.__name__)) self.__class__.__name__))
def check_output(self, eqn, *ops): def check_output(self, eqn, *ops):
......
...@@ -104,7 +104,7 @@ class TestHeavisideAPI_float64(unittest.TestCase): ...@@ -104,7 +104,7 @@ class TestHeavisideAPI_float64(unittest.TestCase):
out = paddle.heaviside(x, y) out = paddle.heaviside(x, y)
exe = paddle.static.Executor(place=place) exe = paddle.static.Executor(place=place)
res = exe.run(prog, res, = exe.run(prog,
feed={ feed={
f"x_{self.dtype}": self.x_np, f"x_{self.dtype}": self.x_np,
f"y_{self.dtype}": self.y_np f"y_{self.dtype}": self.y_np
...@@ -112,7 +112,7 @@ class TestHeavisideAPI_float64(unittest.TestCase): ...@@ -112,7 +112,7 @@ class TestHeavisideAPI_float64(unittest.TestCase):
fetch_list=out, fetch_list=out,
use_prune=True) use_prune=True)
self.assertTrue(np.allclose(res, self.out_np)) np.testing.assert_allclose(res, self.out_np, rtol=1e-05)
def test_dygraph(self): def test_dygraph(self):
for use_cuda in ([False, True] for use_cuda in ([False, True]
...@@ -122,7 +122,7 @@ class TestHeavisideAPI_float64(unittest.TestCase): ...@@ -122,7 +122,7 @@ class TestHeavisideAPI_float64(unittest.TestCase):
result = paddle.heaviside(paddle.to_tensor(self.x_np), result = paddle.heaviside(paddle.to_tensor(self.x_np),
paddle.to_tensor(self.y_np)) paddle.to_tensor(self.y_np))
self.assertTrue(np.allclose(result.numpy(), self.out_np)) np.testing.assert_allclose(result.numpy(), self.out_np, rtol=1e-05)
class TestHeavisideAPI_float32(TestHeavisideAPI_float64): class TestHeavisideAPI_float32(TestHeavisideAPI_float64):
......
...@@ -48,16 +48,16 @@ class TestFracAPI(unittest.TestCase): ...@@ -48,16 +48,16 @@ class TestFracAPI(unittest.TestCase):
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0) place = fluid.CUDAPlace(0)
exe = fluid.Executor(place) exe = fluid.Executor(place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) res, = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_frac(self.x_np) out_ref = ref_frac(self.x_np)
self.assertTrue(np.allclose(out_ref, res)) np.testing.assert_allclose(out_ref, res, rtol=1e-05)
def test_api_dygraph(self): def test_api_dygraph(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
out = paddle.frac(x) out = paddle.frac(x)
out_ref = ref_frac(self.x_np) out_ref = ref_frac(self.x_np)
self.assertTrue(np.allclose(out_ref, out.numpy())) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
def test_api_eager(self): def test_api_eager(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
...@@ -65,7 +65,7 @@ class TestFracAPI(unittest.TestCase): ...@@ -65,7 +65,7 @@ class TestFracAPI(unittest.TestCase):
x_tensor = paddle.to_tensor(self.x_np) x_tensor = paddle.to_tensor(self.x_np)
out = paddle.frac(x_tensor) out = paddle.frac(x_tensor)
out_ref = ref_frac(self.x_np) out_ref = ref_frac(self.x_np)
self.assertTrue(np.allclose(out_ref, out.numpy())) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_api_eager_dygraph(self): def test_api_eager_dygraph(self):
......
...@@ -248,7 +248,7 @@ class TestGatherNdAPI2(unittest.TestCase): ...@@ -248,7 +248,7 @@ class TestGatherNdAPI2(unittest.TestCase):
}, },
fetch_list=[out]) fetch_list=[out])
expected_output = np.array([[3, 4]]) expected_output = np.array([[3, 4]])
self.assertTrue(np.allclose(result, expected_output)) np.testing.assert_allclose(result, expected_output, rtol=1e-05)
def test_imperative(self): def test_imperative(self):
paddle.disable_static() paddle.disable_static()
...@@ -258,8 +258,8 @@ class TestGatherNdAPI2(unittest.TestCase): ...@@ -258,8 +258,8 @@ class TestGatherNdAPI2(unittest.TestCase):
index = fluid.dygraph.to_variable(index_1) index = fluid.dygraph.to_variable(index_1)
output = paddle.fluid.layers.gather(input, index) output = paddle.fluid.layers.gather(input, index)
output_np = output.numpy() output_np = output.numpy()
expected_output = np.array([3, 4]) expected_output = np.array([[3, 4]])
self.assertTrue(np.allclose(output_np, expected_output)) np.testing.assert_allclose(output_np, expected_output, rtol=1e-05)
paddle.enable_static() paddle.enable_static()
......
...@@ -746,9 +746,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase): ...@@ -746,9 +746,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
np_div = np.array([[0, 2, 3], [2, 8, 10], [1, 4, 5]], dtype="float32") np_div = np.array([[0, 2, 3], [2, 8, 10], [1, 4, 5]], dtype="float32")
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res): for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res):
self.assertTrue( np.testing.assert_allclose(
np.allclose(np_res, paddle_res, atol=1e-6), "two value is\ np_res,
{}\n{}, check diff!".format(np_res, paddle_res)) paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_res, paddle_res))
def test_compute_all_with_mean(self): def test_compute_all_with_mean(self):
paddle.disable_static() paddle.disable_static()
...@@ -774,9 +778,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase): ...@@ -774,9 +778,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
np_div = np.array([[0, 2, 3], [1, 4, 5], [1, 4, 5]], dtype="float32") np_div = np.array([[0, 2, 3], [1, 4, 5], [1, 4, 5]], dtype="float32")
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res): for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res):
self.assertTrue( np.testing.assert_allclose(
np.allclose(np_res, paddle_res, atol=1e-6), "two value is\ np_res,
{}\n{}, check diff!".format(np_res, paddle_res)) paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_res, paddle_res))
def test_compute_all_with_max(self): def test_compute_all_with_max(self):
paddle.disable_static() paddle.disable_static()
...@@ -801,11 +809,15 @@ class API_GeometricSendUERecvTest(unittest.TestCase): ...@@ -801,11 +809,15 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
np_mul = np.array([[0, 2, 3], [2, 6, 7], [1, 4, 5]], dtype="float32") np_mul = np.array([[0, 2, 3], [2, 6, 7], [1, 4, 5]], dtype="float32")
np_div = np.array([[0, 2, 3], [2, 6, 7], [1, 4, 5]], dtype="float32") np_div = np.array([[0, 2, 3], [2, 6, 7], [1, 4, 5]], dtype="float32")
self.assertTrue(np.allclose(np_sub, res_sub, atol=1e-6)) np.testing.assert_allclose(np_sub, res_sub, rtol=1e-05, atol=1e-06)
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res): for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res):
self.assertTrue( np.testing.assert_allclose(
np.allclose(np_res, paddle_res, atol=1e-6), "two value is\ np_res,
{}\n{}, check diff!".format(np_res, paddle_res)) paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_res, paddle_res))
def test_compute_all_with_max_fp16(self): def test_compute_all_with_max_fp16(self):
paddle.disable_static() paddle.disable_static()
...@@ -840,13 +852,20 @@ class API_GeometricSendUERecvTest(unittest.TestCase): ...@@ -840,13 +852,20 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
np_div = np.array([[0, 2, 3], [2, 6, 7], [1, 4, 5]], np_div = np.array([[0, 2, 3], [2, 6, 7], [1, 4, 5]],
dtype="float16") dtype="float16")
self.assertTrue(np.allclose(np_sub, res_sub, atol=1e-6)) np.testing.assert_allclose(np_sub,
res_sub,
rtol=1e-05,
atol=1e-06)
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div],
res): res):
self.assertTrue( np.testing.assert_allclose(
np.allclose(np_res, paddle_res, atol=1e-6), np_res,
"two value is\ paddle_res,
{}\n{}, check diff!".format(np_res, paddle_res)) rtol=1e-05,
atol=1e-06,
err_msg=
'two value is {}\n{}, check diff!'
.format(np_res, paddle_res))
def test_compute_all_with_min(self): def test_compute_all_with_min(self):
paddle.disable_static() paddle.disable_static()
...@@ -872,9 +891,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase): ...@@ -872,9 +891,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
np_div = np.array([[0, 2, 3], [0, 2, 3], [1, 4, 5]], dtype="float32") np_div = np.array([[0, 2, 3], [0, 2, 3], [1, 4, 5]], dtype="float32")
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res): for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res):
self.assertTrue( np.testing.assert_allclose(
np.allclose(np_res, paddle_res, atol=1e-6), "two value is\ np_res,
{}\n{}, check diff!".format(np_res, paddle_res)) paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_res, paddle_res))
def test_compute_all_with_min_fp16(self): def test_compute_all_with_min_fp16(self):
paddle.disable_static() paddle.disable_static()
...@@ -910,10 +933,14 @@ class API_GeometricSendUERecvTest(unittest.TestCase): ...@@ -910,10 +933,14 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div],
res): res):
self.assertTrue( np.testing.assert_allclose(
np.allclose(np_res, paddle_res, atol=1e-6), np_res,
"two value is\ paddle_res,
{}\n{}, check diff!".format(np_res, paddle_res)) rtol=1e-05,
atol=1e-06,
err_msg=
'two value is {}\n{}, check diff!'
.format(np_res, paddle_res))
def test_reshape_lhs_rhs(self): def test_reshape_lhs_rhs(self):
paddle.disable_static() paddle.disable_static()
...@@ -927,9 +954,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase): ...@@ -927,9 +954,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
"add", "min") "add", "min")
np_add = np.array([[1, 3, 4], [1, 3, 4], [2, 5, 6]], np_add = np.array([[1, 3, 4], [1, 3, 4], [2, 5, 6]],
dtype="float16").reshape([3, 3, 1]) dtype="float16").reshape([3, 3, 1])
self.assertTrue( np.testing.assert_allclose(
np.allclose(np_add, res_add, atol=1e-6), "two value is\ np_add,
{}\n{}, check diff!".format(np_add, res_add)) res_add,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_add, res_add))
def test_out_size_tensor_static(self): def test_out_size_tensor_static(self):
paddle.enable_static() paddle.enable_static()
...@@ -962,9 +993,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase): ...@@ -962,9 +993,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
'out_size': data5, 'out_size': data5,
}, },
fetch_list=[res_sum]) fetch_list=[res_sum])
self.assertTrue( np.testing.assert_allclose(
np.allclose(np_sum, ret[0], atol=1e-6), "two value is\ np_sum,
{}\n{}, check diff!".format(np_sum, ret[0])) ret[0],
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_sum, ret[0]))
def test_api_eager_dygraph(self): def test_api_eager_dygraph(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -198,9 +198,13 @@ class API_GeometricSendUVTest(unittest.TestCase): ...@@ -198,9 +198,13 @@ class API_GeometricSendUVTest(unittest.TestCase):
dtype="float32") dtype="float32")
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res): for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res):
self.assertTrue( np.testing.assert_allclose(
np.allclose(np_res, paddle_res, atol=1e-6), "two value is\ np_res,
{}\n{}, check diff!".format(np_res, paddle_res)) paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_res, paddle_res))
def test_compute_all_static(self): def test_compute_all_static(self):
paddle.enable_static() paddle.enable_static()
...@@ -256,9 +260,13 @@ class API_GeometricSendUVTest(unittest.TestCase): ...@@ -256,9 +260,13 @@ class API_GeometricSendUVTest(unittest.TestCase):
fetch_list=[res_add, res_sub, res_mul, res_div]) fetch_list=[res_add, res_sub, res_mul, res_div])
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div],
ret): ret):
self.assertTrue( np.testing.assert_allclose(
np.allclose(np_res, paddle_res, atol=1e-6), "two value is\ np_res,
{}\n{}, check diff!".format(np_res, paddle_res)) paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'
.format(np_res, paddle_res))
def test_api_eager_dygraph(self): def test_api_eager_dygraph(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -61,7 +61,7 @@ class TestEagerGrad(TestCase): ...@@ -61,7 +61,7 @@ class TestEagerGrad(TestCase):
# stop_gradient = !create_graph, create_graph default false # stop_gradient = !create_graph, create_graph default false
self.assertEqual(dx[0].stop_gradient, True) self.assertEqual(dx[0].stop_gradient, True)
self.assertTrue(np.allclose(dx[0].numpy(), expected_dx[0])) np.testing.assert_allclose(dx[0].numpy(), expected_dx, rtol=1e-05)
def test_simple_example_eager_grad(self): def test_simple_example_eager_grad(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -83,7 +83,7 @@ class TestEagerGrad(TestCase): ...@@ -83,7 +83,7 @@ class TestEagerGrad(TestCase):
dx = fluid.dygraph.grad(out, [x, z], allow_unused=True) dx = fluid.dygraph.grad(out, [x, z], allow_unused=True)
dout = np.ones_like(np_y) dout = np.ones_like(np_y)
expected_dx = np.matmul(dout, np.transpose(np_y)) expected_dx = np.matmul(dout, np.transpose(np_y))
self.assertTrue(np.allclose(dx[0].numpy(), expected_dx[0])) np.testing.assert_allclose(dx[0].numpy(), expected_dx, rtol=1e-05)
# stop_gradient = !create_graph, create_graph default false # stop_gradient = !create_graph, create_graph default false
self.assertEqual(dx[0].stop_gradient, True) self.assertEqual(dx[0].stop_gradient, True)
# x is unused input in the graph # x is unused input in the graph
......
...@@ -74,7 +74,7 @@ class TestMaskedSelectAPI(unittest.TestCase): ...@@ -74,7 +74,7 @@ class TestMaskedSelectAPI(unittest.TestCase):
mask = paddle.to_tensor(np_mask) mask = paddle.to_tensor(np_mask)
out = paddle.masked_select(x, mask) out = paddle.masked_select(x, mask)
np_out = np_masked_select(np_x, np_mask) np_out = np_masked_select(np_x, np_mask)
self.assertEqual(np.allclose(out.numpy(), np_out), True) np.testing.assert_allclose(out.numpy(), np_out, rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_static_mode(self): def test_static_mode(self):
...@@ -89,13 +89,13 @@ class TestMaskedSelectAPI(unittest.TestCase): ...@@ -89,13 +89,13 @@ class TestMaskedSelectAPI(unittest.TestCase):
exe = paddle.static.Executor(place=paddle.CPUPlace()) exe = paddle.static.Executor(place=paddle.CPUPlace())
res = exe.run(paddle.static.default_main_program(), res, = exe.run(paddle.static.default_main_program(),
feed={ feed={
"x": np_x, "x": np_x,
"mask": np_mask "mask": np_mask
}, },
fetch_list=[out]) fetch_list=[out])
self.assertEqual(np.allclose(res, np_out), True) np.testing.assert_allclose(res, np_out, rtol=1e-05)
class TestMaskedSelectError(unittest.TestCase): class TestMaskedSelectError(unittest.TestCase):
......
...@@ -41,11 +41,11 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -41,11 +41,11 @@ class TestMathOpPatches(unittest.TestCase):
b_np, c_np, d_np = exe.run(fluid.default_main_program(), b_np, c_np, d_np = exe.run(fluid.default_main_program(),
feed={"a": a_np}, feed={"a": a_np},
fetch_list=[b, c, d]) fetch_list=[b, c, d])
self.assertTrue(np.allclose(a_np + 10, b_np)) np.testing.assert_allclose(a_np + 10, b_np, rtol=1e-05)
ab_np = np.concatenate([a_np, b_np], axis=1) ab_np = np.concatenate([a_np, b_np], axis=1)
self.assertTrue(np.allclose(ab_np + 10, c_np)) np.testing.assert_allclose(ab_np + 10, c_np, rtol=1e-05)
d_expected = ab_np + np.concatenate([a_np, a_np], axis=1) d_expected = ab_np + np.concatenate([a_np, a_np], axis=1)
self.assertTrue(np.allclose(d_expected, d_np)) np.testing.assert_allclose(d_expected, d_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_radd_scalar(self): def test_radd_scalar(self):
...@@ -57,7 +57,7 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -57,7 +57,7 @@ class TestMathOpPatches(unittest.TestCase):
b_np = exe.run(fluid.default_main_program(), b_np = exe.run(fluid.default_main_program(),
feed={"a": a_np}, feed={"a": a_np},
fetch_list=[b]) fetch_list=[b])
self.assertTrue(np.allclose(a_np + 10, b_np)) np.testing.assert_allclose(a_np + 10, b_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_sub_scalar(self): def test_sub_scalar(self):
...@@ -66,10 +66,10 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -66,10 +66,10 @@ class TestMathOpPatches(unittest.TestCase):
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32') a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = exe.run(fluid.default_main_program(), b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np}, feed={"a": a_np},
fetch_list=[b]) fetch_list=[b])
self.assertTrue(np.allclose(a_np - 10, b_np)) np.testing.assert_allclose(a_np - 10, b_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_radd_scalar(self): def test_radd_scalar(self):
...@@ -78,10 +78,10 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -78,10 +78,10 @@ class TestMathOpPatches(unittest.TestCase):
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32') a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = exe.run(fluid.default_main_program(), b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np}, feed={"a": a_np},
fetch_list=[b]) fetch_list=[b])
self.assertTrue(np.allclose(10 - a_np, b_np)) np.testing.assert_allclose(10 - a_np, b_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_mul_scalar(self): def test_mul_scalar(self):
...@@ -90,10 +90,10 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -90,10 +90,10 @@ class TestMathOpPatches(unittest.TestCase):
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32') a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = exe.run(fluid.default_main_program(), b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np}, feed={"a": a_np},
fetch_list=[b]) fetch_list=[b])
self.assertTrue(np.allclose(a_np * 10, b_np)) np.testing.assert_allclose(a_np * 10, b_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_rmul_scalar(self): def test_rmul_scalar(self):
...@@ -102,10 +102,10 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -102,10 +102,10 @@ class TestMathOpPatches(unittest.TestCase):
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32') a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = exe.run(fluid.default_main_program(), b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np}, feed={"a": a_np},
fetch_list=[b]) fetch_list=[b])
self.assertTrue(np.allclose(10 * a_np, b_np)) np.testing.assert_allclose(10 * a_np, b_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_div_scalar(self): def test_div_scalar(self):
...@@ -114,10 +114,10 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -114,10 +114,10 @@ class TestMathOpPatches(unittest.TestCase):
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32') a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = exe.run(fluid.default_main_program(), b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np}, feed={"a": a_np},
fetch_list=[b]) fetch_list=[b])
self.assertTrue(np.allclose(a_np / 10, b_np)) np.testing.assert_allclose(a_np / 10, b_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_rdiv_scalar(self): def test_rdiv_scalar(self):
...@@ -127,10 +127,10 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -127,10 +127,10 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2 a_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2
b_np = exe.run(fluid.default_main_program(), b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np}, feed={"a": a_np},
fetch_list=[b]) fetch_list=[b])
self.assertTrue(np.allclose(10 / a_np, b_np)) np.testing.assert_allclose(10 / a_np, b_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_div_two_tensor(self): def test_div_two_tensor(self):
...@@ -141,13 +141,13 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -141,13 +141,13 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32') a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2 b_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2
c_np = exe.run(fluid.default_main_program(), c_np, = exe.run(fluid.default_main_program(),
feed={ feed={
"a": a_np, "a": a_np,
'b': b_np 'b': b_np
}, },
fetch_list=[c]) fetch_list=[c])
self.assertTrue(np.allclose(a_np / b_np, c_np)) np.testing.assert_allclose(a_np / b_np, c_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_mul_two_tensor(self): def test_mul_two_tensor(self):
...@@ -158,13 +158,13 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -158,13 +158,13 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32') a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = np.random.random(size=[10, 1]).astype('float32') b_np = np.random.random(size=[10, 1]).astype('float32')
c_np = exe.run(fluid.default_main_program(), c_np, = exe.run(fluid.default_main_program(),
feed={ feed={
"a": a_np, "a": a_np,
'b': b_np 'b': b_np
}, },
fetch_list=[c]) fetch_list=[c])
self.assertTrue(np.allclose(a_np * b_np, c_np)) np.testing.assert_allclose(a_np * b_np, c_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_add_two_tensor(self): def test_add_two_tensor(self):
...@@ -175,13 +175,13 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -175,13 +175,13 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32') a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = np.random.random(size=[10, 1]).astype('float32') b_np = np.random.random(size=[10, 1]).astype('float32')
c_np = exe.run(fluid.default_main_program(), c_np, = exe.run(fluid.default_main_program(),
feed={ feed={
"a": a_np, "a": a_np,
'b': b_np 'b': b_np
}, },
fetch_list=[c]) fetch_list=[c])
self.assertTrue(np.allclose(a_np + b_np, c_np)) np.testing.assert_allclose(a_np + b_np, c_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_sub_two_tensor(self): def test_sub_two_tensor(self):
...@@ -192,13 +192,13 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -192,13 +192,13 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32') a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = np.random.random(size=[10, 1]).astype('float32') b_np = np.random.random(size=[10, 1]).astype('float32')
c_np = exe.run(fluid.default_main_program(), c_np, = exe.run(fluid.default_main_program(),
feed={ feed={
"a": a_np, "a": a_np,
'b': b_np 'b': b_np
}, },
fetch_list=[c]) fetch_list=[c])
self.assertTrue(np.allclose(a_np - b_np, c_np)) np.testing.assert_allclose(a_np - b_np, c_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_integer_div(self): def test_integer_div(self):
...@@ -212,7 +212,7 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -212,7 +212,7 @@ class TestMathOpPatches(unittest.TestCase):
fetch_list=[b]) fetch_list=[b])
b_np_actual = (a_np / 7).astype('float32') b_np_actual = (a_np / 7).astype('float32')
self.assertTrue(np.allclose(b_np, b_np_actual)) np.testing.assert_allclose(b_np, b_np_actual, rtol=1e-05)
@prog_scope() @prog_scope()
def test_equal(self): def test_equal(self):
...@@ -266,10 +266,10 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -266,10 +266,10 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float32') a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float32')
b_np = exe.run(fluid.default_main_program(), b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np}, feed={"a": a_np},
fetch_list=[b]) fetch_list=[b])
self.assertTrue(np.allclose(-a_np, b_np)) np.testing.assert_allclose(-a_np, b_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_astype(self): def test_astype(self):
...@@ -279,10 +279,10 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -279,10 +279,10 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float64') a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float64')
b_np = exe.run(fluid.default_main_program(), b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np}, feed={"a": a_np},
fetch_list=[b]) fetch_list=[b])
self.assertTrue(np.allclose(a_np.astype('float32'), b_np)) np.testing.assert_allclose(a_np.astype('float32'), b_np, rtol=1e-05)
def test_bitwise_and(self): def test_bitwise_and(self):
x_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32") x_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
...@@ -384,13 +384,13 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -384,13 +384,13 @@ class TestMathOpPatches(unittest.TestCase):
b_np = np.random.uniform(-1, 1, size=[3, 5]).astype('float32') b_np = np.random.uniform(-1, 1, size=[3, 5]).astype('float32')
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
c_np = exe.run(paddle.static.default_main_program(), c_np, = exe.run(paddle.static.default_main_program(),
feed={ feed={
"a": a_np, "a": a_np,
"b": b_np "b": b_np
}, },
fetch_list=[c]) fetch_list=[c])
self.assertTrue(np.allclose(a_np @ b_np, c_np)) np.testing.assert_allclose(a_np @ b_np, c_np, rtol=1e-05)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -40,7 +40,7 @@ class TestMseLoss(unittest.TestCase): ...@@ -40,7 +40,7 @@ class TestMseLoss(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = Executor(place) exe = Executor(place)
result = exe.run(fluid.default_main_program(), result, = exe.run(fluid.default_main_program(),
feed={ feed={
"input": input_val, "input": input_val,
"label": label_val "label": label_val
...@@ -91,7 +91,7 @@ class TestNNMseLoss(unittest.TestCase): ...@@ -91,7 +91,7 @@ class TestNNMseLoss(unittest.TestCase):
ret = mse_loss(input, label) ret = mse_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np "label": label_np
...@@ -106,9 +106,9 @@ class TestNNMseLoss(unittest.TestCase): ...@@ -106,9 +106,9 @@ class TestNNMseLoss(unittest.TestCase):
sub = input_np - label_np sub = input_np - label_np
expected = np.mean(sub * sub) expected = np.mean(sub * sub)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(dy_result.shape, [1]) self.assertTrue(dy_result.shape, [1])
def test_NNMseLoss_sum(self): def test_NNMseLoss_sum(self):
...@@ -131,7 +131,7 @@ class TestNNMseLoss(unittest.TestCase): ...@@ -131,7 +131,7 @@ class TestNNMseLoss(unittest.TestCase):
ret = mse_loss(input, label) ret = mse_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np "label": label_np
...@@ -146,9 +146,9 @@ class TestNNMseLoss(unittest.TestCase): ...@@ -146,9 +146,9 @@ class TestNNMseLoss(unittest.TestCase):
sub = input_np - label_np sub = input_np - label_np
expected = np.sum(sub * sub) expected = np.sum(sub * sub)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(dy_result.shape, [1]) self.assertTrue(dy_result.shape, [1])
def test_NNMseLoss_none(self): def test_NNMseLoss_none(self):
...@@ -171,7 +171,7 @@ class TestNNMseLoss(unittest.TestCase): ...@@ -171,7 +171,7 @@ class TestNNMseLoss(unittest.TestCase):
ret = mse_loss(input, label) ret = mse_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np "label": label_np
...@@ -186,9 +186,9 @@ class TestNNMseLoss(unittest.TestCase): ...@@ -186,9 +186,9 @@ class TestNNMseLoss(unittest.TestCase):
sub = input_np - label_np sub = input_np - label_np
expected = (sub * sub) expected = (sub * sub)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(dy_result.shape, [1]) self.assertTrue(dy_result.shape, [1])
...@@ -214,7 +214,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase): ...@@ -214,7 +214,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"target": target_np "target": target_np
...@@ -229,9 +229,9 @@ class TestNNFunctionalMseLoss(unittest.TestCase): ...@@ -229,9 +229,9 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
sub = input_np - target_np sub = input_np - target_np
expected = np.mean(sub * sub) expected = np.mean(sub * sub)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(dy_result.shape, [1]) self.assertTrue(dy_result.shape, [1])
def test_NNFunctionalMseLoss_sum(self): def test_NNFunctionalMseLoss_sum(self):
...@@ -254,7 +254,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase): ...@@ -254,7 +254,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"target": target_np "target": target_np
...@@ -269,9 +269,9 @@ class TestNNFunctionalMseLoss(unittest.TestCase): ...@@ -269,9 +269,9 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
sub = input_np - target_np sub = input_np - target_np
expected = np.sum(sub * sub) expected = np.sum(sub * sub)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(dy_result.shape, [1]) self.assertTrue(dy_result.shape, [1])
def test_NNFunctionalMseLoss_none(self): def test_NNFunctionalMseLoss_none(self):
...@@ -294,7 +294,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase): ...@@ -294,7 +294,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"target": target_np "target": target_np
...@@ -309,9 +309,9 @@ class TestNNFunctionalMseLoss(unittest.TestCase): ...@@ -309,9 +309,9 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
sub = input_np - target_np sub = input_np - target_np
expected = sub * sub expected = sub * sub
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(dy_result.shape, [1]) self.assertTrue(dy_result.shape, [1])
......
...@@ -256,16 +256,20 @@ class APITestMultiDot(unittest.TestCase): ...@@ -256,16 +256,20 @@ class APITestMultiDot(unittest.TestCase):
exe = paddle.static.Executor(paddle.CPUPlace()) exe = paddle.static.Executor(paddle.CPUPlace())
data1 = np.random.rand(3, 2).astype("float64") data1 = np.random.rand(3, 2).astype("float64")
data2 = np.random.rand(2, 3).astype("float64") data2 = np.random.rand(2, 3).astype("float64")
np_res = exe.run(feed={ np_res, = exe.run(feed={
'x0': data1, 'x0': data1,
'x1': data2 'x1': data2
}, },
fetch_list=[result]) fetch_list=[result])
expected_result = np.linalg.multi_dot([data1, data2]) expected_result = np.linalg.multi_dot([data1, data2])
self.assertTrue( np.testing.assert_allclose(
np.allclose(np_res, expected_result, atol=1e-5), "two value is\ np_res,
{}\n{}, check diff!".format(np_res, expected_result)) expected_result,
rtol=1e-05,
atol=1e-05,
err_msg='two value is {}\n{}, check diff!'.format(
np_res, expected_result))
def test_dygraph_without_out(self): def test_dygraph_without_out(self):
paddle.disable_static() paddle.disable_static()
...@@ -276,7 +280,7 @@ class APITestMultiDot(unittest.TestCase): ...@@ -276,7 +280,7 @@ class APITestMultiDot(unittest.TestCase):
data2 = paddle.to_tensor(input_array2) data2 = paddle.to_tensor(input_array2)
out = paddle.linalg.multi_dot([data1, data2]) out = paddle.linalg.multi_dot([data1, data2])
expected_result = np.linalg.multi_dot([input_array1, input_array2]) expected_result = np.linalg.multi_dot([input_array1, input_array2])
self.assertTrue(np.allclose(expected_result, out.numpy())) np.testing.assert_allclose(expected_result, out.numpy(), rtol=1e-05)
def test_dygraph_final_state_api(self): def test_dygraph_final_state_api(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -86,7 +86,7 @@ def test_static(place, ...@@ -86,7 +86,7 @@ def test_static(place,
reduction=reduction) reduction=reduction)
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
static_result = exe.run(prog, feed=feed_dict, fetch_list=[res]) static_result, = exe.run(prog, feed=feed_dict, fetch_list=[res])
return static_result return static_result
...@@ -164,9 +164,9 @@ class TestMultiLabelMarginLoss(unittest.TestCase): ...@@ -164,9 +164,9 @@ class TestMultiLabelMarginLoss(unittest.TestCase):
input_np=input, input_np=input,
label_np=label, label_np=label,
reduction=reduction) reduction=reduction)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static(place=place, static_functional = test_static(place=place,
input_np=input, input_np=input,
label_np=label, label_np=label,
...@@ -177,9 +177,13 @@ class TestMultiLabelMarginLoss(unittest.TestCase): ...@@ -177,9 +177,13 @@ class TestMultiLabelMarginLoss(unittest.TestCase):
label_np=label, label_np=label,
reduction=reduction, reduction=reduction,
functional=True) functional=True)
self.assertTrue(np.allclose(static_functional, expected)) np.testing.assert_allclose(static_functional,
self.assertTrue(np.allclose(static_functional, dy_functional)) expected,
self.assertTrue(np.allclose(dy_functional, expected)) rtol=1e-05)
np.testing.assert_allclose(static_functional,
dy_functional,
rtol=1e-05)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
def test_MultiLabelSoftMarginLoss_error(self): def test_MultiLabelSoftMarginLoss_error(self):
paddle.disable_static() paddle.disable_static()
...@@ -217,9 +221,9 @@ class TestMultiLabelMarginLoss(unittest.TestCase): ...@@ -217,9 +221,9 @@ class TestMultiLabelMarginLoss(unittest.TestCase):
label_np=label, label_np=label,
weight_np=weight, weight_np=weight,
reduction=reduction) reduction=reduction)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static(place=place, static_functional = test_static(place=place,
input_np=input, input_np=input,
label_np=label, label_np=label,
...@@ -232,9 +236,9 @@ class TestMultiLabelMarginLoss(unittest.TestCase): ...@@ -232,9 +236,9 @@ class TestMultiLabelMarginLoss(unittest.TestCase):
weight=weight, weight=weight,
reduction=reduction, reduction=reduction,
functional=True) functional=True)
self.assertTrue(np.allclose(static_functional, expected)) np.testing.assert_allclose(static_functional, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_functional, dy_functional)) np.testing.assert_allclose(static_functional, dy_functional, rtol=1e-05)
self.assertTrue(np.allclose(dy_functional, expected)) np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
def test_MultiLabelSoftMarginLoss_dimension(self): def test_MultiLabelSoftMarginLoss_dimension(self):
paddle.disable_static() paddle.disable_static()
......
...@@ -96,7 +96,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -96,7 +96,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np "label": label_np
...@@ -117,10 +117,10 @@ class TestNLLLoss(unittest.TestCase): ...@@ -117,10 +117,10 @@ class TestNLLLoss(unittest.TestCase):
eager_result = eager_res.numpy() eager_result = eager_res.numpy()
expected = nll_loss_1d(input_np, label_np)[0] expected = nll_loss_1d(input_np, label_np)[0]
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(eager_result, expected)) np.testing.assert_allclose(eager_result, expected, rtol=1e-05)
def test_NLLLoss_1D_sum(self): def test_NLLLoss_1D_sum(self):
np.random.seed(200) np.random.seed(200)
...@@ -139,7 +139,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -139,7 +139,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np "label": label_np
...@@ -163,10 +163,10 @@ class TestNLLLoss(unittest.TestCase): ...@@ -163,10 +163,10 @@ class TestNLLLoss(unittest.TestCase):
loss.backward() loss.backward()
expected = nll_loss_1d(input_np, label_np, reduction='sum')[0] expected = nll_loss_1d(input_np, label_np, reduction='sum')[0]
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(eager_result, expected)) np.testing.assert_allclose(eager_result, expected, rtol=1e-05)
def test_NLLLoss_1D_with_weight_mean(self): def test_NLLLoss_1D_with_weight_mean(self):
np.random.seed(200) np.random.seed(200)
...@@ -187,7 +187,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -187,7 +187,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np, "label": label_np,
...@@ -213,10 +213,10 @@ class TestNLLLoss(unittest.TestCase): ...@@ -213,10 +213,10 @@ class TestNLLLoss(unittest.TestCase):
expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0] expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0]
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(eager_result, expected)) np.testing.assert_allclose(eager_result, expected, rtol=1e-05)
def test_NLLLoss_1D_with_weight_sum(self): def test_NLLLoss_1D_with_weight_sum(self):
np.random.seed(200) np.random.seed(200)
...@@ -237,7 +237,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -237,7 +237,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np, "label": label_np,
...@@ -256,9 +256,9 @@ class TestNLLLoss(unittest.TestCase): ...@@ -256,9 +256,9 @@ class TestNLLLoss(unittest.TestCase):
weight=weight_np, weight=weight_np,
reduction='sum')[0] reduction='sum')[0]
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
def test_NLLLoss_1D_with_weight_mean_cpu(self): def test_NLLLoss_1D_with_weight_mean_cpu(self):
np.random.seed(200) np.random.seed(200)
...@@ -277,7 +277,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -277,7 +277,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np, "label": label_np,
...@@ -293,9 +293,9 @@ class TestNLLLoss(unittest.TestCase): ...@@ -293,9 +293,9 @@ class TestNLLLoss(unittest.TestCase):
dy_result = dy_res.numpy() dy_result = dy_res.numpy()
expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0] expected = nll_loss_1d(input_np, label_np, weight=weight_np)[0]
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
def test_NLLLoss_1D_with_weight_no_reduce_cpu(self): def test_NLLLoss_1D_with_weight_no_reduce_cpu(self):
np.random.seed(200) np.random.seed(200)
...@@ -314,7 +314,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -314,7 +314,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np, "label": label_np,
...@@ -333,9 +333,9 @@ class TestNLLLoss(unittest.TestCase): ...@@ -333,9 +333,9 @@ class TestNLLLoss(unittest.TestCase):
weight=weight_np, weight=weight_np,
reduction='none') reduction='none')
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
def test_NLLLoss_2D_mean(self): def test_NLLLoss_2D_mean(self):
np.random.seed(200) np.random.seed(200)
...@@ -356,7 +356,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -356,7 +356,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np "label": label_np
...@@ -371,9 +371,9 @@ class TestNLLLoss(unittest.TestCase): ...@@ -371,9 +371,9 @@ class TestNLLLoss(unittest.TestCase):
expected = nll_loss_2d(input_np, label_np)[0] expected = nll_loss_2d(input_np, label_np)[0]
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
def test_NLLLoss_2D_sum(self): def test_NLLLoss_2D_sum(self):
np.random.seed(200) np.random.seed(200)
...@@ -394,7 +394,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -394,7 +394,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np "label": label_np
...@@ -409,9 +409,9 @@ class TestNLLLoss(unittest.TestCase): ...@@ -409,9 +409,9 @@ class TestNLLLoss(unittest.TestCase):
expected = nll_loss_2d(input_np, label_np, reduction='sum')[0] expected = nll_loss_2d(input_np, label_np, reduction='sum')[0]
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
def test_NLLLoss_2D_with_weight_mean(self): def test_NLLLoss_2D_with_weight_mean(self):
np.random.seed(200) np.random.seed(200)
...@@ -435,7 +435,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -435,7 +435,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np, "label": label_np,
...@@ -452,9 +452,9 @@ class TestNLLLoss(unittest.TestCase): ...@@ -452,9 +452,9 @@ class TestNLLLoss(unittest.TestCase):
expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0] expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0]
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
def test_NLLLoss_2D_with_weight_mean_cpu(self): def test_NLLLoss_2D_with_weight_mean_cpu(self):
np.random.seed(200) np.random.seed(200)
...@@ -476,7 +476,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -476,7 +476,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np, "label": label_np,
...@@ -493,9 +493,9 @@ class TestNLLLoss(unittest.TestCase): ...@@ -493,9 +493,9 @@ class TestNLLLoss(unittest.TestCase):
expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0] expected = nll_loss_2d(input_np, label_np, weight=weight_np)[0]
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
def test_NLLLoss_2D_with_weight_sum(self): def test_NLLLoss_2D_with_weight_sum(self):
np.random.seed(200) np.random.seed(200)
...@@ -518,7 +518,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -518,7 +518,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np, "label": label_np,
...@@ -538,9 +538,9 @@ class TestNLLLoss(unittest.TestCase): ...@@ -538,9 +538,9 @@ class TestNLLLoss(unittest.TestCase):
weight=weight_np, weight=weight_np,
reduction='sum')[0] reduction='sum')[0]
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
def test_NLLLoss_in_dims_not_2or4_mean(self): def test_NLLLoss_in_dims_not_2or4_mean(self):
np.random.seed(200) np.random.seed(200)
...@@ -561,7 +561,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -561,7 +561,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np "label": label_np
...@@ -581,9 +581,9 @@ class TestNLLLoss(unittest.TestCase): ...@@ -581,9 +581,9 @@ class TestNLLLoss(unittest.TestCase):
label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1)) label_np_reshape = np.reshape(label_np, (label_shape[0], 1, -1))
expected = nll_loss_2d(input_np_reshape, label_np_reshape)[0] expected = nll_loss_2d(input_np_reshape, label_np_reshape)[0]
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
def test_NLLLoss_in_dims_not_2or4_with_weight_mean(self): def test_NLLLoss_in_dims_not_2or4_with_weight_mean(self):
np.random.seed(200) np.random.seed(200)
...@@ -606,7 +606,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -606,7 +606,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np, "label": label_np,
...@@ -630,9 +630,9 @@ class TestNLLLoss(unittest.TestCase): ...@@ -630,9 +630,9 @@ class TestNLLLoss(unittest.TestCase):
label_np_reshape, label_np_reshape,
weight=weight_np)[0] weight=weight_np)[0]
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
def test_NLLLoss_in_dims_not_2or4_with_weight_sum(self): def test_NLLLoss_in_dims_not_2or4_with_weight_sum(self):
np.random.seed(200) np.random.seed(200)
...@@ -655,7 +655,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -655,7 +655,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np, "label": label_np,
...@@ -680,9 +680,9 @@ class TestNLLLoss(unittest.TestCase): ...@@ -680,9 +680,9 @@ class TestNLLLoss(unittest.TestCase):
weight=weight_np, weight=weight_np,
reduction='sum')[0] reduction='sum')[0]
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce(self): def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce(self):
np.random.seed(200) np.random.seed(200)
...@@ -705,7 +705,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -705,7 +705,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np, "label": label_np,
...@@ -731,9 +731,9 @@ class TestNLLLoss(unittest.TestCase): ...@@ -731,9 +731,9 @@ class TestNLLLoss(unittest.TestCase):
weight=weight_np, weight=weight_np,
reduction='none') reduction='none')
expected = np.reshape(expected, out_shape) expected = np.reshape(expected, out_shape)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce_cpu(self): def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce_cpu(self):
np.random.seed(200) np.random.seed(200)
...@@ -754,7 +754,7 @@ class TestNLLLoss(unittest.TestCase): ...@@ -754,7 +754,7 @@ class TestNLLLoss(unittest.TestCase):
res = nll_loss(input, label) res = nll_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np, "label": label_np,
...@@ -780,9 +780,9 @@ class TestNLLLoss(unittest.TestCase): ...@@ -780,9 +780,9 @@ class TestNLLLoss(unittest.TestCase):
weight=weight_np, weight=weight_np,
reduction='none') reduction='none')
expected = np.reshape(expected, out_shape) expected = np.reshape(expected, out_shape)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result, expected, rtol=1e-05)
self.assertTrue(np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
self.assertTrue(np.allclose(dy_result, expected)) np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
class TestNLLLossOp1DWithReduce(OpTest): class TestNLLLossOp1DWithReduce(OpTest):
......
...@@ -134,12 +134,12 @@ class TestNormalAPI(unittest.TestCase): ...@@ -134,12 +134,12 @@ class TestNormalAPI(unittest.TestCase):
ret = ret.flatten().reshape([self.repeat_num, -1]) ret = ret.flatten().reshape([self.repeat_num, -1])
mean = np.mean(ret, axis=0) mean = np.mean(ret, axis=0)
std = np.std(ret, axis=0) std = np.std(ret, axis=0)
mean_ref=self.mean.reshape([1, -1]) \ mean_ref=self.mean.flatten() \
if isinstance(self.mean, np.ndarray) else self.mean if isinstance(self.mean, np.ndarray) else self.mean
std_ref=self.std.reshape([1, -1]) \ std_ref=self.std.flatten() \
if isinstance(self.std, np.ndarray) else self.std if isinstance(self.std, np.ndarray) else self.std
self.assertTrue(np.allclose(mean_ref, mean, 0.2, 0.2)) np.testing.assert_allclose(mean_ref, mean, rtol=0.2, atol=0.2)
self.assertTrue(np.allclose(std_ref, std, 0.2, 0.2)) np.testing.assert_allclose(std_ref, std, rtol=0.2, atol=0.2)
class TestNormalAPI_mean_is_tensor(TestNormalAPI): class TestNormalAPI_mean_is_tensor(TestNormalAPI):
......
...@@ -54,7 +54,7 @@ class TestNormalization(unittest.TestCase): ...@@ -54,7 +54,7 @@ class TestNormalization(unittest.TestCase):
self.set_inputs(place) self.set_inputs(place)
exe = fluid.Executor(place) exe = fluid.Executor(place)
output = exe.run(fluid.default_main_program(), output, = exe.run(fluid.default_main_program(),
feed=self.inputs, feed=self.inputs,
fetch_list=self.fetch_list, fetch_list=self.fetch_list,
return_numpy=True) return_numpy=True)
...@@ -91,7 +91,10 @@ class TestNormalization(unittest.TestCase): ...@@ -91,7 +91,10 @@ class TestNormalization(unittest.TestCase):
expect_output = self.l2_normalize(self.data, axis, epsilon) expect_output = self.l2_normalize(self.data, axis, epsilon)
# check output # check output
self.assertTrue(np.allclose(self.op_output, expect_output, atol=0.001)) np.testing.assert_allclose(self.op_output,
expect_output,
rtol=1e-05,
atol=0.001)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -60,7 +60,7 @@ class TestPassBuilder(unittest.TestCase): ...@@ -60,7 +60,7 @@ class TestPassBuilder(unittest.TestCase):
test_loss, = exe.run(test_cp, test_loss, = exe.run(test_cp,
fetch_list=[loss.name], fetch_list=[loss.name],
feed=feed_dict) feed=feed_dict)
train_loss = exe.run(train_cp, train_loss, = exe.run(train_cp,
fetch_list=[loss.name], fetch_list=[loss.name],
feed=feed_dict) feed=feed_dict)
...@@ -72,10 +72,13 @@ class TestPassBuilder(unittest.TestCase): ...@@ -72,10 +72,13 @@ class TestPassBuilder(unittest.TestCase):
if math.isnan(float(avg_train_loss_val)): if math.isnan(float(avg_train_loss_val)):
sys.exit("got NaN loss, training failed.") sys.exit("got NaN loss, training failed.")
self.assertTrue( np.testing.assert_allclose(train_loss,
np.allclose(train_loss, test_loss, test_loss,
atol=1e-8), "Train loss: " + str(train_loss) + rtol=1e-05,
"\n Test loss:" + str(test_loss)) atol=1e-08,
err_msg='Train loss: ' +
str(train_loss) + '\n Test loss:' +
str(test_loss))
def test_parallel_testing_with_new_strategy(self): def test_parallel_testing_with_new_strategy(self):
build_strategy = fluid.BuildStrategy() build_strategy = fluid.BuildStrategy()
......
...@@ -32,7 +32,7 @@ def ref_prelu(x, weight): ...@@ -32,7 +32,7 @@ def ref_prelu(x, weight):
neg_indices = x <= 0 neg_indices = x <= 0
assert x.shape == neg_indices.shape assert x.shape == neg_indices.shape
x_t[neg_indices] = (x_t * weight)[neg_indices] x_t[neg_indices] = (x_t * weight)[neg_indices]
return (x_t, ) return x_t
def ref_prelu_nn(x, num_parameters, init): def ref_prelu_nn(x, num_parameters, init):
...@@ -61,7 +61,7 @@ class TestFunctionalPReluAPI(unittest.TestCase): ...@@ -61,7 +61,7 @@ class TestFunctionalPReluAPI(unittest.TestCase):
}, },
fetch_list=[out]) fetch_list=[out])
out_ref = ref_prelu(self.x_np, weight_np) out_ref = ref_prelu(self.x_np, weight_np)
self.assertEqual(np.allclose(out_ref, res[0]), True) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def dygraph_check(self, weight_np): def dygraph_check(self, weight_np):
paddle.disable_static(self.place) paddle.disable_static(self.place)
...@@ -69,7 +69,7 @@ class TestFunctionalPReluAPI(unittest.TestCase): ...@@ -69,7 +69,7 @@ class TestFunctionalPReluAPI(unittest.TestCase):
weight = paddle.to_tensor(weight_np) weight = paddle.to_tensor(weight_np)
out = F.prelu(x, weight) out = F.prelu(x, weight)
out_ref = ref_prelu(self.x_np, weight_np) out_ref = ref_prelu(self.x_np, weight_np)
self.assertEqual(np.allclose(out_ref, out.numpy()), True) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_static_api(self): def test_static_api(self):
...@@ -125,7 +125,7 @@ class TestNNPReluAPI(unittest.TestCase): ...@@ -125,7 +125,7 @@ class TestNNPReluAPI(unittest.TestCase):
feed={'X': self.x_np}, feed={'X': self.x_np},
fetch_list=[out]) fetch_list=[out])
out_ref = ref_prelu_nn(self.x_np, 1, 0.25) out_ref = ref_prelu_nn(self.x_np, 1, 0.25)
self.assertEqual(np.allclose(out_ref, res[0]), True) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_dygraph_api(self): def test_dygraph_api(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
...@@ -134,32 +134,32 @@ class TestNNPReluAPI(unittest.TestCase): ...@@ -134,32 +134,32 @@ class TestNNPReluAPI(unittest.TestCase):
m = paddle.nn.PReLU() m = paddle.nn.PReLU()
out = m(x) out = m(x)
out_ref = ref_prelu_nn(self.x_np, 1, 0.25) out_ref = ref_prelu_nn(self.x_np, 1, 0.25)
self.assertEqual(np.allclose(out_ref, out.numpy()), True) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
m = paddle.nn.PReLU(num_parameters=self.x_np.shape[1]) m = paddle.nn.PReLU(num_parameters=self.x_np.shape[1])
out = m(x) out = m(x)
out_ref = ref_prelu_nn(self.x_np, self.x_np.shape[1], 0.25) out_ref = ref_prelu_nn(self.x_np, self.x_np.shape[1], 0.25)
self.assertEqual(np.allclose(out_ref, out.numpy()), True) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
m = paddle.nn.PReLU(init=0.5) m = paddle.nn.PReLU(init=0.5)
out = m(x) out = m(x)
out_ref = ref_prelu_nn(self.x_np, 1, 0.5) out_ref = ref_prelu_nn(self.x_np, 1, 0.5)
self.assertEqual(np.allclose(out_ref, out.numpy()), True) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
m = paddle.nn.PReLU(weight_attr=fluid.ParamAttr(name="weight")) m = paddle.nn.PReLU(weight_attr=fluid.ParamAttr(name="weight"))
out = m(x) out = m(x)
out_ref = ref_prelu_nn(self.x_np, 1, 0.25) out_ref = ref_prelu_nn(self.x_np, 1, 0.25)
self.assertEqual(np.allclose(out_ref, out.numpy()), True) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
m = paddle.nn.PReLU(weight_attr=fluid.ParamAttr( m = paddle.nn.PReLU(weight_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.5))) initializer=fluid.initializer.Constant(0.5)))
out = m(x) out = m(x)
out_ref = ref_prelu_nn(self.x_np, 1, 0.5) out_ref = ref_prelu_nn(self.x_np, 1, 0.5)
self.assertEqual(np.allclose(out_ref, out.numpy()), True) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
......
...@@ -334,13 +334,13 @@ class TestPSROIPoolStaticAPI(unittest.TestCase): ...@@ -334,13 +334,13 @@ class TestPSROIPoolStaticAPI(unittest.TestCase):
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
boxes_lod_data = paddle.fluid.create_lod_tensor( boxes_lod_data = paddle.fluid.create_lod_tensor(
self.boxes, [[1, 2]], place) self.boxes, [[1, 2]], place)
out_res = exe.run(paddle.static.default_main_program(), out_res, = exe.run(paddle.static.default_main_program(),
feed={ feed={
'x': self.x, 'x': self.x,
'boxes': boxes_lod_data 'boxes': boxes_lod_data
}, },
fetch_list=[out.name]) fetch_list=[out.name])
self.assertTrue(np.allclose(out_res, expect_out)) np.testing.assert_allclose(out_res, expect_out, rtol=1e-05)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -63,9 +63,10 @@ class TestQueue(unittest.TestCase): ...@@ -63,9 +63,10 @@ class TestQueue(unittest.TestCase):
0) if core.is_compiled_with_cuda() else fluid.CPUPlace() 0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
exe.run(startup_program) exe.run(startup_program)
ret = exe.run(main_program, fetch_list=[data_out.name]) ret, = exe.run(main_program, fetch_list=[data_out.name])
self.assertTrue( np.testing.assert_allclose(np.asarray(ret),
np.allclose(np.asarray(ret), np.full((2, 3), value, np.float32))) np.full((2, 3), value, np.float32),
rtol=1e-05)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -157,16 +157,20 @@ class TestReorderLoDTensor(unittest.TestCase): ...@@ -157,16 +157,20 @@ class TestReorderLoDTensor(unittest.TestCase):
# check output # check output
expect_output, expect_output_lod = self.reorder() expect_output, expect_output_lod = self.reorder()
for actual_output in self.actual_outputs: for actual_output in self.actual_outputs:
self.assertTrue( np.testing.assert_allclose(np.array(actual_output),
np.allclose(np.array(actual_output), expect_output, atol=0.001)) expect_output,
rtol=1e-05,
atol=0.001)
self.assertEqual(expect_output_lod, self.assertEqual(expect_output_lod,
actual_output.recursive_sequence_lengths()) actual_output.recursive_sequence_lengths())
# check gradient # check gradient
expect_grad = np.ones_like(self.data[self.data_desc[0][0]][0]) expect_grad = np.ones_like(self.data[self.data_desc[0][0]][0])
expect_grad_lod = self.data[self.data_desc[0][0]][1] expect_grad_lod = self.data[self.data_desc[0][0]][1]
for actual_grad in self.actual_grads: for actual_grad in self.actual_grads:
self.assertTrue( np.testing.assert_allclose(np.array(actual_grad),
np.allclose(np.array(actual_grad), expect_grad, atol=0.001)) expect_grad,
rtol=1e-05,
atol=0.001)
self.assertEqual(expect_grad_lod, self.assertEqual(expect_grad_lod,
actual_grad.recursive_sequence_lengths()) actual_grad.recursive_sequence_lengths())
...@@ -177,16 +181,20 @@ class TestReorderLoDTensor(unittest.TestCase): ...@@ -177,16 +181,20 @@ class TestReorderLoDTensor(unittest.TestCase):
# check output # check output
expect_output, expect_output_lod = self.reorder() expect_output, expect_output_lod = self.reorder()
for actual_output in self.actual_outputs: for actual_output in self.actual_outputs:
self.assertTrue( np.testing.assert_allclose(np.array(actual_output),
np.allclose(np.array(actual_output), expect_output, atol=0.001)) expect_output,
rtol=1e-05,
atol=0.001)
self.assertEqual(expect_output_lod, self.assertEqual(expect_output_lod,
actual_output.recursive_sequence_lengths()) actual_output.recursive_sequence_lengths())
# check gradient # check gradient
expect_grad = np.ones_like(self.data[self.data_desc[0][0]][0]) expect_grad = np.ones_like(self.data[self.data_desc[0][0]][0])
expect_grad_lod = self.data[self.data_desc[0][0]][1] expect_grad_lod = self.data[self.data_desc[0][0]][1]
for actual_grad in self.actual_grads: for actual_grad in self.actual_grads:
self.assertTrue( np.testing.assert_allclose(np.array(actual_grad),
np.allclose(np.array(actual_grad), expect_grad, atol=0.001)) expect_grad,
rtol=1e-05,
atol=0.001)
self.assertEqual(expect_grad_lod, self.assertEqual(expect_grad_lod,
actual_grad.recursive_sequence_lengths()) actual_grad.recursive_sequence_lengths())
...@@ -196,13 +204,16 @@ class TestReorderLoDTensor(unittest.TestCase): ...@@ -196,13 +204,16 @@ class TestReorderLoDTensor(unittest.TestCase):
self.inputs[self.data_desc[0][0]].set_recursive_sequence_lengths( self.inputs[self.data_desc[0][0]].set_recursive_sequence_lengths(
input_lod) input_lod)
# preserve the output of LodTensor with implicit lod to compare # preserve the output of LodTensor with implicit lod to compare
expect_output = [ expect_outputs = [
np.array(actual_output) for actual_output in self.actual_outputs np.array(actual_output) for actual_output in self.actual_outputs
] ]
self.run_program() self.run_program()
for actual_output in self.actual_outputs: for actual_output, expect_output in zip(self.actual_outputs,
self.assertTrue( expect_outputs):
np.allclose(np.array(actual_output), expect_output, atol=0.001)) np.testing.assert_allclose(np.array(actual_output),
expect_output,
rtol=1e-05,
atol=0.001)
class TestReorderLoDTensorError(unittest.TestCase): class TestReorderLoDTensorError(unittest.TestCase):
......
...@@ -78,13 +78,13 @@ class TestFunctionalRReluAPI(unittest.TestCase): ...@@ -78,13 +78,13 @@ class TestFunctionalRReluAPI(unittest.TestCase):
feed={"input": in_np}, feed={"input": in_np},
fetch_list=[res1]) fetch_list=[res1])
self.assertTrue(np.allclose(fetches[0], res_np1)) np.testing.assert_allclose(fetches[0], res_np1, rtol=1e-05)
res_np2 = ref_rrelu(in_np, self.lower_1, self.upper_1) res_np2 = ref_rrelu(in_np, self.lower_1, self.upper_1)
fetches = exe.run(fluid.default_main_program(), fetches = exe.run(fluid.default_main_program(),
feed={"input": in_np}, feed={"input": in_np},
fetch_list=[res2]) fetch_list=[res2])
self.assertTrue(np.allclose(fetches[0], res_np2)) np.testing.assert_allclose(fetches[0], res_np2, rtol=1e-05)
def test_static(self): def test_static(self):
for place in self.places: for place in self.places:
...@@ -106,23 +106,23 @@ class TestFunctionalRReluAPI(unittest.TestCase): ...@@ -106,23 +106,23 @@ class TestFunctionalRReluAPI(unittest.TestCase):
out_3 = F.rrelu(x_2, self.lower_1, self.upper_1, training=True) out_3 = F.rrelu(x_2, self.lower_1, self.upper_1, training=True)
exe = paddle.static.Executor(place=place) exe = paddle.static.Executor(place=place)
res_1 = exe.run(fluid.default_main_program(), res_1, = exe.run(fluid.default_main_program(),
feed={"x": self.x_np}, feed={"x": self.x_np},
fetch_list=out_1, fetch_list=out_1,
use_prune=True) use_prune=True)
res_2 = exe.run(fluid.default_main_program(), res_2, = exe.run(fluid.default_main_program(),
feed={"x2": self.x_np}, feed={"x2": self.x_np},
fetch_list=out_2, fetch_list=out_2,
use_prune=True) use_prune=True)
res_3 = exe.run(fluid.default_main_program(), res_3, = exe.run(fluid.default_main_program(),
feed={"x2": self.x_np}, feed={"x2": self.x_np},
fetch_list=out_3, fetch_list=out_3,
use_prune=True) use_prune=True)
out_ref_1 = ref_rrelu(self.x_np, self.lower_0, self.upper_0) out_ref_1 = ref_rrelu(self.x_np, self.lower_0, self.upper_0)
out_ref_2 = ref_rrelu(self.x_np, self.lower_1, self.upper_1) out_ref_2 = ref_rrelu(self.x_np, self.lower_1, self.upper_1)
self.assertEqual(np.allclose(out_ref_1, res_1), True) np.testing.assert_allclose(out_ref_1, res_1, rtol=1e-05)
self.assertEqual(np.allclose(out_ref_2, res_2), True) np.testing.assert_allclose(out_ref_2, res_2, rtol=1e-05)
self.assertTrue( self.assertTrue(
check_output(self.x_np, res_3[0], self.lower_1, self.upper_1)) check_output(self.x_np, res_3[0], self.lower_1, self.upper_1))
...@@ -164,7 +164,7 @@ class TestFunctionalRReluAPI(unittest.TestCase): ...@@ -164,7 +164,7 @@ class TestFunctionalRReluAPI(unittest.TestCase):
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
out = F.rrelu(x, lower, upper, training=False) out = F.rrelu(x, lower, upper, training=False)
out_ref = ref_rrelu(self.x_np, lower, upper) out_ref = ref_rrelu(self.x_np, lower, upper)
self.assertEqual(np.allclose(out_ref, out), True) np.testing.assert_allclose(out_ref, out, rtol=1e-05)
paddle.enable_static() paddle.enable_static()
def test_dygraph_functional(self): def test_dygraph_functional(self):
......
...@@ -119,13 +119,13 @@ class TestSearchSortedAPI(unittest.TestCase): ...@@ -119,13 +119,13 @@ class TestSearchSortedAPI(unittest.TestCase):
dtype="float64") dtype="float64")
out = paddle.searchsorted(sorted_sequence, values) out = paddle.searchsorted(sorted_sequence, values)
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
res = exe.run(feed={ res, = exe.run(feed={
'SortedSequence': self.sorted_sequence, 'SortedSequence': self.sorted_sequence,
'Values': self.values 'Values': self.values
}, },
fetch_list=out) fetch_list=out)
out_ref = np.searchsorted(self.sorted_sequence, self.values) out_ref = np.searchsorted(self.sorted_sequence, self.values)
self.assertTrue(np.allclose(out_ref, res)) np.testing.assert_allclose(out_ref, res, rtol=1e-05)
for place in self.place: for place in self.place:
run(place) run(place)
...@@ -141,7 +141,7 @@ class TestSearchSortedAPI(unittest.TestCase): ...@@ -141,7 +141,7 @@ class TestSearchSortedAPI(unittest.TestCase):
out_ref = np.searchsorted(self.sorted_sequence, out_ref = np.searchsorted(self.sorted_sequence,
self.values, self.values,
side='right') side='right')
self.assertEqual(np.allclose(out_ref, out.numpy()), True) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
for place in self.place: for place in self.place:
......
...@@ -69,7 +69,7 @@ class TestSignAPI(unittest.TestCase): ...@@ -69,7 +69,7 @@ class TestSignAPI(unittest.TestCase):
z = paddle.sgn(x) z = paddle.sgn(x)
np_z = z.numpy() np_z = z.numpy()
z_expected = np_sgn(np_x) z_expected = np_sgn(np_x)
self.assertTrue(np.allclose(np_z, z_expected)) np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
def test_float(self): def test_float(self):
for dtype in self.support_dtypes: for dtype in self.support_dtypes:
...@@ -78,7 +78,7 @@ class TestSignAPI(unittest.TestCase): ...@@ -78,7 +78,7 @@ class TestSignAPI(unittest.TestCase):
z = paddle.sgn(x) z = paddle.sgn(x)
np_z = z.numpy() np_z = z.numpy()
z_expected = np_sgn(np_x) z_expected = np_sgn(np_x)
self.assertTrue(np.allclose(np_z, z_expected)) np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -142,9 +142,9 @@ class TestSigmoidFocalLoss(unittest.TestCase): ...@@ -142,9 +142,9 @@ class TestSigmoidFocalLoss(unittest.TestCase):
for alpha in alphas: for alpha in alphas:
for gamma in gammas: for gamma in gammas:
for normalizer_np in normalizer_nps: for normalizer_np in normalizer_nps:
static_result = test_static(place, logit_np, static_result, = test_static(
label_np, normalizer_np, place, logit_np, label_np, normalizer_np, alpha,
alpha, gamma, reduction) gamma, reduction)
dy_result = test_dygraph(place, logit_np, label_np, dy_result = test_dygraph(place, logit_np, label_np,
normalizer_np, alpha, normalizer_np, alpha,
gamma, reduction) gamma, reduction)
...@@ -155,12 +155,18 @@ class TestSigmoidFocalLoss(unittest.TestCase): ...@@ -155,12 +155,18 @@ class TestSigmoidFocalLoss(unittest.TestCase):
expected = calc_sigmoid_focal_loss( expected = calc_sigmoid_focal_loss(
logit_np, label_np, normalizer_np, alpha, gamma, logit_np, label_np, normalizer_np, alpha, gamma,
reduction) reduction)
self.assertTrue(np.allclose(static_result, np.testing.assert_allclose(static_result,
expected)) expected,
self.assertTrue( rtol=1e-05)
np.allclose(static_result, dy_result)) np.testing.assert_allclose(static_result,
self.assertTrue(np.allclose(dy_result, expected)) dy_result,
self.assertTrue(np.allclose(eager_result, expected)) rtol=1e-05)
np.testing.assert_allclose(dy_result,
expected,
rtol=1e-05)
np.testing.assert_allclose(eager_result,
expected,
rtol=1e-05)
def test_SigmoidFocalLoss_error(self): def test_SigmoidFocalLoss_error(self):
paddle.disable_static() paddle.disable_static()
......
...@@ -58,7 +58,7 @@ class SmoothL1Loss(unittest.TestCase): ...@@ -58,7 +58,7 @@ class SmoothL1Loss(unittest.TestCase):
ret = smooth_l1_loss(input, label) ret = smooth_l1_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_ret = exe.run(prog, static_ret, = exe.run(prog,
feed={ feed={
'input': input_np, 'input': input_np,
'label': label_np, 'label': label_np,
...@@ -72,9 +72,9 @@ class SmoothL1Loss(unittest.TestCase): ...@@ -72,9 +72,9 @@ class SmoothL1Loss(unittest.TestCase):
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value) self.assertIsNotNone(dy_ret_value)
expected = smooth_l1_loss_np(input_np, label_np, reduction='mean') expected = smooth_l1_loss_np(input_np, label_np, reduction='mean')
self.assertTrue(np.allclose(static_ret, dy_ret_value)) np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
self.assertTrue(np.allclose(static_ret, expected)) np.testing.assert_allclose(static_ret, expected, rtol=1e-05)
self.assertTrue(np.allclose(dy_ret_value, expected)) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
def test_smooth_l1_loss_sum(self): def test_smooth_l1_loss_sum(self):
input_np = np.random.random([100, 200]).astype(np.float32) input_np = np.random.random([100, 200]).astype(np.float32)
...@@ -90,7 +90,7 @@ class SmoothL1Loss(unittest.TestCase): ...@@ -90,7 +90,7 @@ class SmoothL1Loss(unittest.TestCase):
ret = smooth_l1_loss(input, label) ret = smooth_l1_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_ret = exe.run(prog, static_ret, = exe.run(prog,
feed={ feed={
'input': input_np, 'input': input_np,
'label': label_np, 'label': label_np,
...@@ -104,9 +104,9 @@ class SmoothL1Loss(unittest.TestCase): ...@@ -104,9 +104,9 @@ class SmoothL1Loss(unittest.TestCase):
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value) self.assertIsNotNone(dy_ret_value)
expected = smooth_l1_loss_np(input_np, label_np, reduction='sum') expected = smooth_l1_loss_np(input_np, label_np, reduction='sum')
self.assertTrue(np.allclose(static_ret, dy_ret_value)) np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
self.assertTrue(np.allclose(static_ret, expected)) np.testing.assert_allclose(static_ret, expected, rtol=1e-05)
self.assertTrue(np.allclose(dy_ret_value, expected)) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
def test_smooth_l1_loss_none(self): def test_smooth_l1_loss_none(self):
input_np = np.random.random([100, 200]).astype(np.float32) input_np = np.random.random([100, 200]).astype(np.float32)
...@@ -122,7 +122,7 @@ class SmoothL1Loss(unittest.TestCase): ...@@ -122,7 +122,7 @@ class SmoothL1Loss(unittest.TestCase):
ret = smooth_l1_loss(input, label) ret = smooth_l1_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_ret = exe.run(prog, static_ret, = exe.run(prog,
feed={ feed={
'input': input_np, 'input': input_np,
'label': label_np, 'label': label_np,
...@@ -136,9 +136,9 @@ class SmoothL1Loss(unittest.TestCase): ...@@ -136,9 +136,9 @@ class SmoothL1Loss(unittest.TestCase):
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value) self.assertIsNotNone(dy_ret_value)
expected = smooth_l1_loss_np(input_np, label_np, reduction='none') expected = smooth_l1_loss_np(input_np, label_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value)) np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
self.assertTrue(np.allclose(static_ret, expected)) np.testing.assert_allclose(static_ret, expected, rtol=1e-05)
self.assertTrue(np.allclose(dy_ret_value, expected)) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
def test_smooth_l1_loss_delta(self): def test_smooth_l1_loss_delta(self):
input_np = np.random.random([100, 200]).astype(np.float32) input_np = np.random.random([100, 200]).astype(np.float32)
...@@ -155,7 +155,7 @@ class SmoothL1Loss(unittest.TestCase): ...@@ -155,7 +155,7 @@ class SmoothL1Loss(unittest.TestCase):
ret = smooth_l1_loss(input, label) ret = smooth_l1_loss(input, label)
exe = fluid.Executor(place) exe = fluid.Executor(place)
static_ret = exe.run(prog, static_ret, = exe.run(prog,
feed={ feed={
'input': input_np, 'input': input_np,
'label': label_np, 'label': label_np,
...@@ -169,9 +169,9 @@ class SmoothL1Loss(unittest.TestCase): ...@@ -169,9 +169,9 @@ class SmoothL1Loss(unittest.TestCase):
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value) self.assertIsNotNone(dy_ret_value)
expected = smooth_l1_loss_np(input_np, label_np, delta=delta) expected = smooth_l1_loss_np(input_np, label_np, delta=delta)
self.assertTrue(np.allclose(static_ret, dy_ret_value)) np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
self.assertTrue(np.allclose(static_ret, expected)) np.testing.assert_allclose(static_ret, expected, rtol=1e-05)
self.assertTrue(np.allclose(dy_ret_value, expected)) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -36,7 +36,7 @@ def test_static_layer( ...@@ -36,7 +36,7 @@ def test_static_layer(
sm_loss = paddle.nn.loss.SoftMarginLoss(reduction=reduction) sm_loss = paddle.nn.loss.SoftMarginLoss(reduction=reduction)
res = sm_loss(input, label) res = sm_loss(input, label)
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np "label": label_np
...@@ -66,7 +66,7 @@ def test_static_functional( ...@@ -66,7 +66,7 @@ def test_static_functional(
label, label,
reduction=reduction) reduction=reduction)
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
static_result = exe.run(prog, static_result, = exe.run(prog,
feed={ feed={
"input": input_np, "input": input_np,
"label": label_np "label": label_np
...@@ -146,17 +146,26 @@ class TestSoftMarginLoss(unittest.TestCase): ...@@ -146,17 +146,26 @@ class TestSoftMarginLoss(unittest.TestCase):
reduction) reduction)
expected = calc_softmarginloss(input_np, label_np, expected = calc_softmarginloss(input_np, label_np,
reduction) reduction)
self.assertTrue(np.allclose(static_result, expected)) np.testing.assert_allclose(static_result,
self.assertTrue(np.allclose(static_result, dy_result)) expected,
self.assertTrue(np.allclose(dy_result, expected)) rtol=1e-05)
np.testing.assert_allclose(static_result,
dy_result,
rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static_functional( static_functional = test_static_functional(
place, input_np, label_np, reduction) place, input_np, label_np, reduction)
dy_functional = test_dygraph_functional( dy_functional = test_dygraph_functional(
place, input_np, label_np, reduction) place, input_np, label_np, reduction)
self.assertTrue(np.allclose(static_functional, expected)) np.testing.assert_allclose(static_functional,
self.assertTrue( expected,
np.allclose(static_functional, dy_functional)) rtol=1e-05)
self.assertTrue(np.allclose(dy_functional, expected)) np.testing.assert_allclose(static_functional,
dy_functional,
rtol=1e-05)
np.testing.assert_allclose(dy_functional,
expected,
rtol=1e-05)
def test_SoftMarginLoss_error(self): def test_SoftMarginLoss_error(self):
paddle.disable_static() paddle.disable_static()
......
...@@ -36,9 +36,9 @@ class TestSoftmax2DAPI(unittest.TestCase): ...@@ -36,9 +36,9 @@ class TestSoftmax2DAPI(unittest.TestCase):
m = paddle.nn.Softmax2D() m = paddle.nn.Softmax2D()
out = m(x) out = m(x)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) res, = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_softmax(self.x_np, self.axis) out_ref = ref_softmax(self.x_np, self.axis)
self.assertTrue(np.allclose(out_ref, res)) np.testing.assert_allclose(out_ref, res, rtol=1e-05)
def test_dygraph_api(self): def test_dygraph_api(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
...@@ -46,7 +46,7 @@ class TestSoftmax2DAPI(unittest.TestCase): ...@@ -46,7 +46,7 @@ class TestSoftmax2DAPI(unittest.TestCase):
m = paddle.nn.Softmax2D() m = paddle.nn.Softmax2D()
out = m(x) out = m(x)
out_ref = ref_softmax(self.x_np, self.axis) out_ref = ref_softmax(self.x_np, self.axis)
self.assertTrue(np.allclose(out_ref, out.numpy())) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
......
...@@ -41,14 +41,14 @@ class TestSquareErrorCost(unittest.TestCase): ...@@ -41,14 +41,14 @@ class TestSquareErrorCost(unittest.TestCase):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = Executor(place) exe = Executor(place)
result = exe.run(fluid.default_main_program(), result, = exe.run(fluid.default_main_program(),
feed={ feed={
"input": input_val, "input": input_val,
"label": label_val "label": label_val
}, },
fetch_list=[output]) fetch_list=[output])
self.assertTrue(np.isclose(np_result, result).all()) np.testing.assert_allclose(np_result, result, rtol=1e-05)
class TestSquareErrorInvalidInput(unittest.TestCase): class TestSquareErrorInvalidInput(unittest.TestCase):
......
...@@ -71,9 +71,9 @@ class TestTrilIndicesAPICaseStatic(unittest.TestCase): ...@@ -71,9 +71,9 @@ class TestTrilIndicesAPICaseStatic(unittest.TestCase):
paddle.static.Program()): paddle.static.Program()):
data1 = paddle.tril_indices(4, 4, -1) data1 = paddle.tril_indices(4, 4, -1)
exe1 = paddle.static.Executor(place) exe1 = paddle.static.Executor(place)
result1 = exe1.run(feed={}, fetch_list=[data1]) result1, = exe1.run(feed={}, fetch_list=[data1])
expected_result1 = np.tril_indices(4, -1, 4) expected_result1 = np.tril_indices(4, -1, 4)
self.assertTrue(np.allclose(result1, expected_result1)) np.testing.assert_allclose(result1, expected_result1, rtol=1e-05)
class TestTrilIndicesAPICaseDygraph(unittest.TestCase): class TestTrilIndicesAPICaseDygraph(unittest.TestCase):
...@@ -121,9 +121,9 @@ class TestTrilIndicesAPICaseDefault(unittest.TestCase): ...@@ -121,9 +121,9 @@ class TestTrilIndicesAPICaseDefault(unittest.TestCase):
paddle.static.Program()): paddle.static.Program()):
data = paddle.tril_indices(4, None, 2) data = paddle.tril_indices(4, None, 2)
exe = paddle.static.Executor(paddle.CPUPlace()) exe = paddle.static.Executor(paddle.CPUPlace())
result = exe.run(feed={}, fetch_list=[data]) result, = exe.run(feed={}, fetch_list=[data])
expected_result = np.tril_indices(4, 2) expected_result = np.tril_indices(4, 2)
self.assertTrue(np.allclose(result, expected_result)) np.testing.assert_allclose(result, expected_result, rtol=1e-05)
with fluid.dygraph.base.guard(paddle.CPUPlace()): with fluid.dygraph.base.guard(paddle.CPUPlace()):
out = paddle.tril_indices(4, None, 2) out = paddle.tril_indices(4, None, 2)
......
...@@ -264,7 +264,7 @@ class API_TestDygraphUnSqueeze(unittest.TestCase): ...@@ -264,7 +264,7 @@ class API_TestDygraphUnSqueeze(unittest.TestCase):
output = self.unsqueeze(input, axis=[1]) output = self.unsqueeze(input, axis=[1])
out_np = output.numpy() out_np = output.numpy()
expected_out = np.expand_dims(input_1, axis=1) expected_out = np.expand_dims(input_1, axis=1)
self.assertTrue(np.allclose(expected_out, out_np)) np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
def test_out_int8(self): def test_out_int8(self):
paddle.disable_static() paddle.disable_static()
...@@ -273,7 +273,7 @@ class API_TestDygraphUnSqueeze(unittest.TestCase): ...@@ -273,7 +273,7 @@ class API_TestDygraphUnSqueeze(unittest.TestCase):
output = self.unsqueeze(input, axis=[1]) output = self.unsqueeze(input, axis=[1])
out_np = output.numpy() out_np = output.numpy()
expected_out = np.expand_dims(input_1, axis=1) expected_out = np.expand_dims(input_1, axis=1)
self.assertTrue(np.allclose(expected_out, out_np)) np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
def test_out_uint8(self): def test_out_uint8(self):
paddle.disable_static() paddle.disable_static()
...@@ -282,7 +282,7 @@ class API_TestDygraphUnSqueeze(unittest.TestCase): ...@@ -282,7 +282,7 @@ class API_TestDygraphUnSqueeze(unittest.TestCase):
output = self.unsqueeze(input, axis=1) output = self.unsqueeze(input, axis=1)
out_np = output.numpy() out_np = output.numpy()
expected_out = np.expand_dims(input_1, axis=1) expected_out = np.expand_dims(input_1, axis=1)
self.assertTrue(np.allclose(expected_out, out_np)) np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
def test_axis_not_list(self): def test_axis_not_list(self):
paddle.disable_static() paddle.disable_static()
...@@ -291,7 +291,7 @@ class API_TestDygraphUnSqueeze(unittest.TestCase): ...@@ -291,7 +291,7 @@ class API_TestDygraphUnSqueeze(unittest.TestCase):
output = self.unsqueeze(input, axis=1) output = self.unsqueeze(input, axis=1)
out_np = output.numpy() out_np = output.numpy()
expected_out = np.expand_dims(input_1, axis=1) expected_out = np.expand_dims(input_1, axis=1)
self.assertTrue(np.allclose(expected_out, out_np)) np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
def test_dimension_not_1(self): def test_dimension_not_1(self):
paddle.disable_static() paddle.disable_static()
...@@ -299,8 +299,8 @@ class API_TestDygraphUnSqueeze(unittest.TestCase): ...@@ -299,8 +299,8 @@ class API_TestDygraphUnSqueeze(unittest.TestCase):
input = paddle.to_tensor(input_1) input = paddle.to_tensor(input_1)
output = self.unsqueeze(input, axis=(1, 2)) output = self.unsqueeze(input, axis=(1, 2))
out_np = output.numpy() out_np = output.numpy()
expected_out = np.expand_dims(input_1, axis=1) expected_out = np.expand_dims(input_1, axis=(1, 2))
self.assertTrue(np.allclose(expected_out, out_np)) np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
class API_TestDygraphUnSqueezeInplace(API_TestDygraphUnSqueeze): class API_TestDygraphUnSqueezeInplace(API_TestDygraphUnSqueeze):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册