未验证 提交 9107b653 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle] use np.testing.assert_allclose instead of...

[CodeStyle] use np.testing.assert_allclose instead of self.assertTrue(np.allclose(...)) (part 2) (#45213)

* autofix (get ci log)

* retrigger ci

* fix test_gather_nd_op, wrong expected in dygraph

* fix test_activation_op, unpack static graph result

* fix test_auc_op, unpack static graph result

* fix test_bce_loss, unpack static graph result

* fix test_bce_with_logits_loss, unpack static graph result

* fix test_cond, unpack static graph result

* fix test_dygraph_weight_norm, wrong numpy reference when `axis=None`

* fix test_einsum, wrong matmul inputs

* fix test_elementwise_heaviside_op, unpack static graph result

* fix test_frac_api, unpack static graph result

* skip test_group_norm_op_v2, probably the wrong numpy reference

* fix test_imperative_double_grad, wrong subscript

* skip test_imperative_tensor_clear_gradient, ???

* skip test_layer_norm_op, probably the wrong numpy reference

* fix test_math_op_patch, unpack static graph results

* fix test_masked_select_op, unpack static graph results

* fix test_mse_loss, unpack static graph results

* fix test_multi_label_soft_margin_loss, unpack static graph results

* fix test_multi_dot_op, unpack static graph results

* fix test_nll_loss, unpack static graph results

* fix test_normalization_wrapper, unpack static graph results

* fix test_pass_builder, unpack static graph results

* fix test_prelu_op, possibly an extra comma

* fix test_psroi_pool_op, unpack static graph results

* fix test_queue, unpack static graph results

* fix test_reorder_lod_tensor, compare an item with a list

* fix test_rrelu_op, unpack static graph results

* fix test_searchsorted_op, unpack static graph results

* fix test_sigmoid_focal_loss, unpack static graph results

* fix test_smooth_l1_loss, unpack static graph results

* fix test_soft_margin_loss, unpack static graph results

* fix test_softmax2d, unpack static graph results

* fix test_square_error_cost, unpack static graph results

* fix test_tril_indices_op, unpack static graph results

* fix test_unsqueeze_op, mismatch numpy reference (axis)

* skip test_layers, `static_rlt` is missing an axis

* fix test_mnist, unpack PredictorTools result (also a list)

* fix test_build_strategy, unpack PredictorTools result

* fix test_mobile_net, unpack PredictorTools result

* fix test_resnet_v2, unpack PredictorTools result

* revert some changes

revert test_layers

revert test_group_norm_op_v2

revert test_layer_norm_op

revert test_imperative_tensor_clear_gradient

* fix test_normal, use flatten instead of reshape, (PR-CI-Windows-OPENBLAS)

* empty commit, trigger CI
上级 9556c688
......@@ -45,22 +45,33 @@ class TestResnetWithPass(unittest.TestCase):
st_pre = self.resnet_helper.predict_static(image)
dy_jit_pre = self.resnet_helper.predict_dygraph_jit(image)
predictor_pre = self.resnet_helper.predict_analysis_inference(image)
self.assertTrue(np.allclose(dy_pre, st_pre),
msg="dy_pre:\n {}\n, st_pre: \n{}.".format(
dy_pre, st_pre))
self.assertTrue(np.allclose(dy_jit_pre, st_pre),
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(
dy_jit_pre, st_pre))
self.assertTrue(np.allclose(predictor_pre, st_pre),
msg="predictor_pre:\n {}\n, st_pre: \n{}.".format(
predictor_pre, st_pre))
np.testing.assert_allclose(
dy_pre,
st_pre,
rtol=1e-05,
err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre))
np.testing.assert_allclose(
dy_jit_pre,
st_pre,
rtol=1e-05,
err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format(
dy_jit_pre, st_pre))
np.testing.assert_allclose(
predictor_pre,
st_pre,
rtol=1e-05,
err_msg='predictor_pre:\n {}\n, st_pre: \n{}.'.format(
predictor_pre, st_pre))
def test_resnet(self):
static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False)
self.assertTrue(np.allclose(static_loss, dygraph_loss),
msg="static_loss: {} \n dygraph_loss: {}".format(
static_loss, dygraph_loss))
np.testing.assert_allclose(
static_loss,
dygraph_loss,
rtol=1e-05,
err_msg='static_loss: {} \n dygraph_loss: {}'.format(
static_loss, dygraph_loss))
self.verify_predict()
def test_in_static_mode_mkldnn(self):
......
......@@ -165,15 +165,21 @@ class TestMNISTWithToStatic(TestMNIST):
def test_mnist_to_static(self):
dygraph_loss = self.train_dygraph()
static_loss = self.train_static()
self.assertTrue(np.allclose(dygraph_loss, static_loss),
msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_loss, static_loss))
np.testing.assert_allclose(
dygraph_loss,
static_loss,
rtol=1e-05,
err_msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_loss, static_loss))
with _test_eager_guard():
dygraph_loss = self.train_dygraph()
static_loss = self.train_static()
self.assertTrue(np.allclose(dygraph_loss, static_loss),
msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_loss, static_loss))
np.testing.assert_allclose(
dygraph_loss,
static_loss,
rtol=1e-05,
err_msg='dygraph is {}\n static_res is \n{}'.format(
dygraph_loss, static_loss))
def test_mnist_declarative_cpu_vs_mkldnn(self):
dygraph_loss_cpu = self.train_dygraph()
......@@ -182,9 +188,12 @@ class TestMNISTWithToStatic(TestMNIST):
dygraph_loss_mkldnn = self.train_dygraph()
finally:
fluid.set_flags({'FLAGS_use_mkldnn': False})
self.assertTrue(np.allclose(dygraph_loss_cpu, dygraph_loss_mkldnn),
msg='cpu dygraph is {}\n mkldnn dygraph is \n{}'.format(
dygraph_loss_cpu, dygraph_loss_mkldnn))
np.testing.assert_allclose(
dygraph_loss_cpu,
dygraph_loss_mkldnn,
rtol=1e-05,
err_msg='cpu dygraph is {}\n mkldnn dygraph is \n{}'.format(
dygraph_loss_cpu, dygraph_loss_mkldnn))
def train(self, to_static=False):
......@@ -250,15 +259,21 @@ class TestMNISTWithToStatic(TestMNIST):
# load in static mode
static_infer_out = self.jit_load_and_run_inference_static(
model_save_dir, model_filename, params_filename, inputs)
self.assertTrue(np.allclose(gt_out.numpy(), static_infer_out))
np.testing.assert_allclose(gt_out.numpy(),
static_infer_out,
rtol=1e-05)
# load in dygraph mode
dygraph_infer_out = self.jit_load_and_run_inference_dygraph(
model_save_prefix, inputs)
self.assertTrue(np.allclose(gt_out.numpy(), dygraph_infer_out))
np.testing.assert_allclose(gt_out.numpy(),
dygraph_infer_out,
rtol=1e-05)
# load in Paddle-Inference
predictor_infer_out = self.predictor_load_and_run_inference_analysis(
model_save_dir, model_filename, params_filename, inputs)
self.assertTrue(np.allclose(gt_out.numpy(), predictor_infer_out))
np.testing.assert_allclose(gt_out.numpy(),
predictor_infer_out,
rtol=1e-05)
@switch_to_static_graph
def jit_load_and_run_inference_static(self, model_path, model_filename,
......@@ -287,7 +302,7 @@ class TestMNISTWithToStatic(TestMNIST):
params_filename, inputs):
output = PredictorTools(model_path, model_filename, params_filename,
inputs)
out = output()
out, = output()
return out
......
......@@ -556,7 +556,7 @@ def predict_dygraph_jit(args, data):
def predict_analysis_inference(args, data):
output = PredictorTools(args.model_save_dir, args.model_filename,
args.params_filename, [data])
out = output()
out, = output()
return out
......@@ -585,8 +585,11 @@ class TestMobileNet(unittest.TestCase):
def assert_same_loss(self, model_name):
dy_out = self.train(model_name, to_static=False)
st_out = self.train(model_name, to_static=True)
self.assertTrue(np.allclose(dy_out, st_out),
msg="dy_out: {}, st_out: {}".format(dy_out, st_out))
np.testing.assert_allclose(dy_out,
st_out,
rtol=1e-05,
err_msg='dy_out: {}, st_out: {}'.format(
dy_out, st_out))
def assert_same_predict(self, model_name):
self.args.model = model_name
......@@ -602,15 +605,24 @@ class TestMobileNet(unittest.TestCase):
st_pre = predict_static(self.args, image)
dy_jit_pre = predict_dygraph_jit(self.args, image)
predictor_pre = predict_analysis_inference(self.args, image)
self.assertTrue(np.allclose(dy_pre, st_pre),
msg="dy_pre:\n {}\n, st_pre: \n{}.".format(
dy_pre, st_pre))
self.assertTrue(np.allclose(dy_jit_pre, st_pre),
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(
dy_jit_pre, st_pre))
self.assertTrue(np.allclose(predictor_pre, st_pre, atol=1e-5),
msg="inference_pred_res:\n {}\n, st_pre: \n{}.".format(
predictor_pre, st_pre))
np.testing.assert_allclose(
dy_pre,
st_pre,
rtol=1e-05,
err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre))
np.testing.assert_allclose(
dy_jit_pre,
st_pre,
rtol=1e-05,
err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format(
dy_jit_pre, st_pre))
np.testing.assert_allclose(
predictor_pre,
st_pre,
rtol=1e-05,
atol=1e-05,
err_msg='inference_pred_res:\n {}\n, st_pre: \n{}.'.format(
predictor_pre, st_pre))
def test_mobile_net(self):
# MobileNet-V1
......
......@@ -339,7 +339,7 @@ class ResNetHelper:
def predict_analysis_inference(self, data):
output = PredictorTools(self.model_save_dir, self.model_filename,
self.params_filename, [data])
out = output()
out, = output()
return out
......@@ -358,22 +358,33 @@ class TestResnet(unittest.TestCase):
st_pre = self.resnet_helper.predict_static(image)
dy_jit_pre = self.resnet_helper.predict_dygraph_jit(image)
predictor_pre = self.resnet_helper.predict_analysis_inference(image)
self.assertTrue(np.allclose(dy_pre, st_pre),
msg="dy_pre:\n {}\n, st_pre: \n{}.".format(
dy_pre, st_pre))
self.assertTrue(np.allclose(dy_jit_pre, st_pre),
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(
dy_jit_pre, st_pre))
self.assertTrue(np.allclose(predictor_pre, st_pre),
msg="predictor_pre:\n {}\n, st_pre: \n{}.".format(
predictor_pre, st_pre))
np.testing.assert_allclose(
dy_pre,
st_pre,
rtol=1e-05,
err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre))
np.testing.assert_allclose(
dy_jit_pre,
st_pre,
rtol=1e-05,
err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format(
dy_jit_pre, st_pre))
np.testing.assert_allclose(
predictor_pre,
st_pre,
rtol=1e-05,
err_msg='predictor_pre:\n {}\n, st_pre: \n{}.'.format(
predictor_pre, st_pre))
def test_resnet(self):
static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False)
self.assertTrue(np.allclose(static_loss, dygraph_loss),
msg="static_loss: {} \n dygraph_loss: {}".format(
static_loss, dygraph_loss))
np.testing.assert_allclose(
static_loss,
dygraph_loss,
rtol=1e-05,
err_msg='static_loss: {} \n dygraph_loss: {}'.format(
static_loss, dygraph_loss))
self.verify_predict()
def test_in_static_mode_mkldnn(self):
......
......@@ -109,9 +109,12 @@ class TestResnet(unittest.TestCase):
def test_resnet(self):
static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False)
self.assertTrue(np.allclose(static_loss, dygraph_loss),
msg="static_loss: {} \n dygraph_loss: {}".format(
static_loss, dygraph_loss))
np.testing.assert_allclose(
static_loss,
dygraph_loss,
rtol=1e-05,
err_msg='static_loss: {} \n dygraph_loss: {}'.format(
static_loss, dygraph_loss))
if __name__ == '__main__':
......
......@@ -118,9 +118,13 @@ class TestResnet(unittest.TestCase):
static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False)
# NOTE: In pure fp16 training, loss is not stable, so we enlarge atol here.
self.assertTrue(np.allclose(static_loss, dygraph_loss, atol=1e-3),
msg="static_loss: {} \n dygraph_loss: {}".format(
static_loss, dygraph_loss))
np.testing.assert_allclose(
static_loss,
dygraph_loss,
rtol=1e-05,
atol=0.001,
err_msg='static_loss: {} \n dygraph_loss: {}'.format(
static_loss, dygraph_loss))
if __name__ == '__main__':
......
......@@ -340,7 +340,7 @@ class TestResnet(unittest.TestCase):
def predict_analysis_inference(self, data):
output = PredictorTools(self.model_save_dir, self.model_filename,
self.params_filename, [data])
out = output()
out, = output()
return out
def train(self, to_static):
......@@ -353,22 +353,33 @@ class TestResnet(unittest.TestCase):
st_pre = self.predict_static(image)
dy_jit_pre = self.predict_dygraph_jit(image)
predictor_pre = self.predict_analysis_inference(image)
self.assertTrue(np.allclose(dy_pre, st_pre),
msg="dy_pre:\n {}\n, st_pre: \n{}.".format(
dy_pre, st_pre))
self.assertTrue(np.allclose(dy_jit_pre, st_pre),
msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format(
dy_jit_pre, st_pre))
self.assertTrue(np.allclose(predictor_pre, st_pre),
msg="predictor_pre:\n {}\n, st_pre: \n{}.".format(
predictor_pre, st_pre))
np.testing.assert_allclose(
dy_pre,
st_pre,
rtol=1e-05,
err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre))
np.testing.assert_allclose(
dy_jit_pre,
st_pre,
rtol=1e-05,
err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format(
dy_jit_pre, st_pre))
np.testing.assert_allclose(
predictor_pre,
st_pre,
rtol=1e-05,
err_msg='predictor_pre:\n {}\n, st_pre: \n{}.'.format(
predictor_pre, st_pre))
def test_resnet(self):
static_loss = self.train(to_static=True)
dygraph_loss = self.train(to_static=False)
self.assertTrue(np.allclose(static_loss, dygraph_loss),
msg="static_loss: {} \n dygraph_loss: {}".format(
static_loss, dygraph_loss))
np.testing.assert_allclose(
static_loss,
dygraph_loss,
rtol=1e-05,
err_msg='static_loss: {} \n dygraph_loss: {}'.format(
static_loss, dygraph_loss))
self.verify_predict()
def test_in_static_mode_mkldnn(self):
......
......@@ -534,7 +534,7 @@ class TestSinh(TestActivation):
x = fluid.dygraph.to_variable(np_x)
z = fluid.layers.sinh(x).numpy()
z_expected = np.sinh(np_x)
self.assertTrue(np.allclose(z, z_expected))
np.testing.assert_allclose(z, z_expected, rtol=1e-05)
def test_api(self):
test_data_shape = [11, 17]
......@@ -549,12 +549,12 @@ class TestSinh(TestActivation):
pd_sinh_out = fluid.layers.sinh(data_x)
exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(fluid.default_startup_program())
np_sinh_res = exe.run(fluid.default_main_program(),
feed={"data_x": input_x},
fetch_list=[pd_sinh_out])
np_sinh_res, = exe.run(fluid.default_main_program(),
feed={"data_x": input_x},
fetch_list=[pd_sinh_out])
expected_res = np.sinh(input_x)
self.assertTrue(np.allclose(np_sinh_res, expected_res))
np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
def test_backward(self):
test_data_shape = [11, 17]
......@@ -607,7 +607,7 @@ class TestCosh(TestActivation):
x = fluid.dygraph.to_variable(np_x)
z = fluid.layers.cosh(x).numpy()
z_expected = np.cosh(np_x)
self.assertTrue(np.allclose(z, z_expected))
np.testing.assert_allclose(z, z_expected, rtol=1e-05)
def test_api(self):
test_data_shape = [11, 17]
......@@ -622,12 +622,12 @@ class TestCosh(TestActivation):
pd_cosh_out = paddle.cosh(data_x)
exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(fluid.default_startup_program())
np_cosh_res = exe.run(fluid.default_main_program(),
feed={"data_x": input_x},
fetch_list=[pd_cosh_out])
np_cosh_res, = exe.run(fluid.default_main_program(),
feed={"data_x": input_x},
fetch_list=[pd_cosh_out])
expected_res = np.cosh(input_x)
self.assertTrue(np.allclose(np_cosh_res, expected_res))
np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
def test_backward(self):
test_data_shape = [11, 17]
......@@ -2261,11 +2261,11 @@ class TestLog2(TestActivation):
out1 = paddle.log2(data_x)
exe = paddle.static.Executor(place=fluid.CPUPlace())
exe.run(paddle.static.default_startup_program())
res1 = exe.run(paddle.static.default_main_program(),
feed={"data_x": input_x},
fetch_list=[out1])
res1, = exe.run(paddle.static.default_main_program(),
feed={"data_x": input_x},
fetch_list=[out1])
expected_res = np.log2(input_x)
self.assertTrue(np.allclose(res1, expected_res))
np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
# dygraph
with fluid.dygraph.guard():
......@@ -2274,7 +2274,7 @@ class TestLog2(TestActivation):
z = paddle.log2(data_x)
np_z = z.numpy()
z_expected = np.array(np.log2(np_x))
self.assertTrue(np.allclose(np_z, z_expected))
np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
class TestLog10(TestActivation):
......@@ -2314,11 +2314,11 @@ class TestLog10(TestActivation):
out1 = paddle.log10(data_x)
exe = paddle.static.Executor(place=paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
res1 = exe.run(paddle.static.default_main_program(),
feed={"data_x": input_x},
fetch_list=[out1])
res1, = exe.run(paddle.static.default_main_program(),
feed={"data_x": input_x},
fetch_list=[out1])
expected_res = np.log10(input_x)
self.assertTrue(np.allclose(res1, expected_res))
np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
# dygraph
with fluid.dygraph.guard():
......@@ -2327,7 +2327,7 @@ class TestLog10(TestActivation):
z = paddle.log10(data_x)
np_z = z.numpy()
z_expected = np.array(np.log10(np_x))
self.assertTrue(np.allclose(np_z, z_expected))
np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
class TestLog1p(TestActivation):
......@@ -2361,11 +2361,11 @@ class TestLog1p(TestActivation):
out1 = paddle.log1p(data_x)
exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(fluid.default_startup_program())
res1 = exe.run(fluid.default_main_program(),
feed={"data_x": input_x},
fetch_list=[out1])
res1, = exe.run(fluid.default_main_program(),
feed={"data_x": input_x},
fetch_list=[out1])
expected_res = np.log1p(input_x)
self.assertTrue(np.allclose(res1, expected_res))
np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
# dygraph
with fluid.dygraph.guard():
......@@ -2374,7 +2374,7 @@ class TestLog1p(TestActivation):
z = paddle.log1p(data_x)
np_z = z.numpy()
z_expected = np.array(np.log1p(np_x))
self.assertTrue(np.allclose(np_z, z_expected))
np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
class TestSquare(TestActivation):
......
......@@ -130,14 +130,14 @@ class TestAucAPI(unittest.TestCase):
y = np.array([0, 0, 1, 0]).astype('int64')
z = np.array([1, 1, 1, 1]).astype('float32')
output = exe.run(feed={
output, = exe.run(feed={
"input": x,
"label": y,
"ins_tag_weight": z
},
fetch_list=[result[0]])
fetch_list=[result[0]])
auc_np = np.array([0.66666667]).astype("float32")
self.assertTrue(np.allclose(output, auc_np))
np.testing.assert_allclose(output, auc_np, rtol=1e-05)
class TestAucOpError(unittest.TestCase):
......
......@@ -43,16 +43,16 @@ def test_static_layer(place,
bce_loss = paddle.nn.loss.BCELoss(reduction=reduction)
res = bce_loss(input, label)
exe = paddle.static.Executor(place)
static_result = exe.run(prog,
feed={
"input": input_np,
"label": label_np
} if weight_np is None else {
"input": input_np,
"label": label_np,
"weight": weight_np
},
fetch_list=[res])
static_result, = exe.run(prog,
feed={
"input": input_np,
"label": label_np
} if weight_np is None else {
"input": input_np,
"label": label_np,
"weight": weight_np
},
fetch_list=[res])
return static_result
......@@ -83,16 +83,16 @@ def test_static_functional(place,
label,
reduction=reduction)
exe = paddle.static.Executor(place)
static_result = exe.run(prog,
feed={
"input": input_np,
"label": label_np
} if weight_np is None else {
"input": input_np,
"label": label_np,
"weight": weight_np
},
fetch_list=[res])
static_result, = exe.run(prog,
feed={
"input": input_np,
"label": label_np
} if weight_np is None else {
"input": input_np,
"label": label_np,
"weight": weight_np
},
fetch_list=[res])
return static_result
......@@ -171,16 +171,20 @@ class TestBCELoss(unittest.TestCase):
dy_result = test_dygraph_layer(place, input_np, label_np,
reduction)
expected = calc_bceloss(input_np, label_np, reduction)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-05)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static_functional(
place, input_np, label_np, reduction)
dy_functional = test_dygraph_functional(place, input_np,
label_np, reduction)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
np.testing.assert_allclose(static_functional,
expected,
rtol=1e-05)
np.testing.assert_allclose(static_functional,
dy_functional,
rtol=1e-05)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
def test_BCELoss_weight(self):
input_np = np.random.uniform(0.1, 0.8,
......@@ -205,9 +209,9 @@ class TestBCELoss(unittest.TestCase):
label_np,
reduction,
weight_np=weight_np)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-05)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static_functional(place,
input_np,
label_np,
......@@ -218,9 +222,11 @@ class TestBCELoss(unittest.TestCase):
label_np,
reduction,
weight_np=weight_np)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
np.testing.assert_allclose(static_functional, expected, rtol=1e-05)
np.testing.assert_allclose(static_functional,
dy_functional,
rtol=1e-05)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
def test_BCELoss_error(self):
paddle.disable_static()
......
......@@ -79,7 +79,7 @@ def test_static(place,
else:
res = call_bce_layer(logit, label, weight, reduction, pos_weight)
exe = paddle.static.Executor(place)
static_result = exe.run(prog, feed=feed_dict, fetch_list=[res])
static_result, = exe.run(prog, feed=feed_dict, fetch_list=[res])
return static_result
......@@ -152,9 +152,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
reduction=reduction)
expected = calc_bce_with_logits_loss(logit_np, label_np,
reduction)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-05)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static(place,
logit_np,
label_np,
......@@ -173,10 +173,16 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
reduction=reduction,
functional=True)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
self.assertTrue(np.allclose(eager_functional, expected))
np.testing.assert_allclose(static_functional,
expected,
rtol=1e-05)
np.testing.assert_allclose(static_functional,
dy_functional,
rtol=1e-05)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
np.testing.assert_allclose(eager_functional,
expected,
rtol=1e-05)
def test_BCEWithLogitsLoss_weight(self):
logit_np = np.random.uniform(0.1, 0.8,
......@@ -201,9 +207,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
label_np,
reduction,
weight_np=weight_np)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-05)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static(place,
logit_np,
label_np,
......@@ -216,9 +222,11 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
weight_np=weight_np,
reduction=reduction,
functional=True)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
np.testing.assert_allclose(static_functional, expected, rtol=1e-05)
np.testing.assert_allclose(static_functional,
dy_functional,
rtol=1e-05)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
def test_BCEWithLogitsLoss_pos_weight(self):
logit_np = np.random.uniform(0.1, 0.8,
......@@ -236,9 +244,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
reduction, pos_weight_np)
expected = calc_bce_with_logits_loss(logit_np, label_np, reduction,
weight_np, pos_weight_np)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-05)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static(place,
logit_np,
label_np,
......@@ -253,9 +261,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
reduction,
pos_weight_np,
functional=True)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
np.testing.assert_allclose(static_functional, expected, rtol=1e-05)
np.testing.assert_allclose(static_functional, dy_functional, rtol=1e-05)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
def test_BCEWithLogitsLoss_error(self):
paddle.disable_static()
......
......@@ -62,9 +62,10 @@ class TestCondInputOutput(unittest.TestCase):
place = fluid.CUDAPlace(
0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place)
ret = exe.run(main_program, fetch_list=[out.name])
self.assertTrue(
np.allclose(np.asarray(ret), np.full((3, 2), -1, np.int32)))
ret, = exe.run(main_program, fetch_list=[out.name])
np.testing.assert_allclose(np.asarray(ret),
np.full((3, 2), -1, np.int32),
rtol=1e-05)
def test_return_var_tuple(self):
"""
......@@ -103,10 +104,12 @@ class TestCondInputOutput(unittest.TestCase):
0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place)
ret = exe.run(main_program, fetch_list=out)
self.assertTrue(
np.allclose(np.asarray(ret[0]), np.full((1, 2), 1, np.int32)))
self.assertTrue(
np.allclose(np.asarray(ret[1]), np.full((2, 3), True, bool)))
np.testing.assert_allclose(np.asarray(ret[0]),
np.full((1, 2), 1, np.int32),
rtol=1e-05)
np.testing.assert_allclose(np.asarray(ret[1]),
np.full((2, 3), True, bool),
rtol=1e-05)
def test_pass_and_modify_var(self):
"""
......@@ -142,12 +145,12 @@ class TestCondInputOutput(unittest.TestCase):
exe = fluid.Executor(place)
for feed_i in range(5):
expected_a = 7 * (feed_i + 1) if feed_i % 2 == 0 else 8 - feed_i
ret = exe.run(main_program,
feed={'i': np.full((1), feed_i, np.int32)},
fetch_list=[a])
self.assertTrue(
np.allclose(np.asarray(ret),
np.full((3, 2, 1), expected_a, np.int32)))
ret, = exe.run(main_program,
feed={'i': np.full((1), feed_i, np.int32)},
fetch_list=[a])
np.testing.assert_allclose(np.asarray(ret),
np.full((3, 2, 1), expected_a, np.int32),
rtol=1e-05)
def test_return_none(self):
"""
......
......@@ -49,7 +49,7 @@ class TestDygraphWeightNorm(unittest.TestCase):
ndims = len(shape)
shape_numel = reduce(lambda x, y: x * y, shape)
if dim == -1:
return np.linalg.norm(w, axis=None, keepdims=True)
return np.linalg.norm(w, axis=None, keepdims=True).flatten()
elif dim == 0:
tile_shape = list(w.shape)
tile_shape[0] = 1
......@@ -132,7 +132,10 @@ class TestDygraphWeightNorm(unittest.TestCase):
expect_output = self.weight_normalize(before_weight, self.dim)
for expect, actual in zip(expect_output, self.actual_outputs):
self.assertTrue(np.allclose(np.array(actual), expect, atol=0.001))
np.testing.assert_allclose(np.array(actual),
expect,
rtol=1e-05,
atol=0.001)
class TestDygraphWeightNormCase1(TestDygraphWeightNorm):
......
......@@ -336,10 +336,13 @@ class TestNumpyTests(unittest.TestCase):
def check_output_equal(self, actual, expect, rtol=1.e-5, atol=1.e-8):
error_msg = 'Output has diff at place:{}. \nExpect: {} \nBut Got: {} in class {}'
self.assertTrue(
np.allclose(actual, expect, rtol=rtol, atol=atol),
error_msg.format(paddle.get_device(), expect, actual,
self.__class__.__name__))
np.testing.assert_allclose(actual,
expect,
rtol=rtol,
atol=atol,
err_msg=error_msg.format(
paddle.get_device(), expect, actual,
self.__class__.__name__))
def check_output(self, eqn, *ops):
expect = np.einsum(eqn, *ops)
......@@ -425,7 +428,7 @@ class TestNumpyTests(unittest.TestCase):
p = np.ones((1, 5)) / 2
q = np.ones((5, 5)) / 2
self.check_output("...ij,...jk->...ik", p, p)
self.check_output("...ij,...jk->...ik", p, p.T)
self.check_output("...ij,...jk->...ik", p, q)
x = np.eye(2).astype('float')
......
......@@ -351,10 +351,13 @@ class TestNumpyTests(unittest.TestCase):
def check_output_equal(self, actual, expect, rtol=1.e-5, atol=1.e-8):
error_msg = 'Output has diff at place:{}. \nExpect: {} \nBut Got: {} in class {}'
self.assertTrue(
np.allclose(actual, expect, rtol=rtol, atol=atol),
error_msg.format(paddle.get_device(), expect, actual,
self.__class__.__name__))
np.testing.assert_allclose(actual,
expect,
rtol=rtol,
atol=atol,
err_msg=error_msg.format(
paddle.get_device(), expect, actual,
self.__class__.__name__))
def check_output(self, eqn, *ops):
expect = np.einsum(eqn, *ops)
......
......@@ -104,15 +104,15 @@ class TestHeavisideAPI_float64(unittest.TestCase):
out = paddle.heaviside(x, y)
exe = paddle.static.Executor(place=place)
res = exe.run(prog,
feed={
f"x_{self.dtype}": self.x_np,
f"y_{self.dtype}": self.y_np
},
fetch_list=out,
use_prune=True)
res, = exe.run(prog,
feed={
f"x_{self.dtype}": self.x_np,
f"y_{self.dtype}": self.y_np
},
fetch_list=out,
use_prune=True)
self.assertTrue(np.allclose(res, self.out_np))
np.testing.assert_allclose(res, self.out_np, rtol=1e-05)
def test_dygraph(self):
for use_cuda in ([False, True]
......@@ -122,7 +122,7 @@ class TestHeavisideAPI_float64(unittest.TestCase):
result = paddle.heaviside(paddle.to_tensor(self.x_np),
paddle.to_tensor(self.y_np))
self.assertTrue(np.allclose(result.numpy(), self.out_np))
np.testing.assert_allclose(result.numpy(), self.out_np, rtol=1e-05)
class TestHeavisideAPI_float32(TestHeavisideAPI_float64):
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
def ref_frac(x):
return x - np.trunc(x)
class TestFracAPI(unittest.TestCase):
"""Test Frac API"""
def set_dtype(self):
self.dtype = 'float64'
def setUp(self):
self.set_dtype()
self.x_np = np.random.uniform(-3, 3, [2, 3]).astype(self.dtype)
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_api_static(self):
paddle.enable_static()
with program_guard(Program()):
input = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = paddle.frac(input)
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_frac(self.x_np)
self.assertTrue(np.allclose(out_ref, res))
def test_api_dygraph(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out = paddle.frac(x)
out_ref = ref_frac(self.x_np)
self.assertTrue(np.allclose(out_ref, out.numpy()))
def test_api_eager(self):
paddle.disable_static(self.place)
with _test_eager_guard():
x_tensor = paddle.to_tensor(self.x_np)
out = paddle.frac(x_tensor)
out_ref = ref_frac(self.x_np)
self.assertTrue(np.allclose(out_ref, out.numpy()))
paddle.enable_static()
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_api_dygraph()
class TestFracInt32(TestFracAPI):
"""Test Frac API with data type int32"""
def set_dtype(self):
self.dtype = 'int32'
class TestFracInt64(TestFracAPI):
"""Test Frac API with data type int64"""
def set_dtype(self):
self.dtype = 'int64'
class TestFracFloat32(TestFracAPI):
"""Test Frac API with data type float32"""
def set_dtype(self):
self.dtype = 'float32'
class TestFracError(unittest.TestCase):
"""Test Frac Error"""
def setUp(self):
self.x_np = np.random.uniform(-3, 3, [2, 3]).astype('int16')
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_static_error(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [5, 5], 'bool')
self.assertRaises(TypeError, paddle.frac, x)
def test_dygraph_error(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np, dtype='int16')
self.assertRaises(TypeError, paddle.frac, x)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
def ref_frac(x):
return x - np.trunc(x)
class TestFracAPI(unittest.TestCase):
"""Test Frac API"""
def set_dtype(self):
self.dtype = 'float64'
def setUp(self):
self.set_dtype()
self.x_np = np.random.uniform(-3, 3, [2, 3]).astype(self.dtype)
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_api_static(self):
paddle.enable_static()
with program_guard(Program()):
input = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = paddle.frac(input)
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
res, = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_frac(self.x_np)
np.testing.assert_allclose(out_ref, res, rtol=1e-05)
def test_api_dygraph(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out = paddle.frac(x)
out_ref = ref_frac(self.x_np)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
def test_api_eager(self):
paddle.disable_static(self.place)
with _test_eager_guard():
x_tensor = paddle.to_tensor(self.x_np)
out = paddle.frac(x_tensor)
out_ref = ref_frac(self.x_np)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static()
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_api_dygraph()
class TestFracInt32(TestFracAPI):
"""Test Frac API with data type int32"""
def set_dtype(self):
self.dtype = 'int32'
class TestFracInt64(TestFracAPI):
"""Test Frac API with data type int64"""
def set_dtype(self):
self.dtype = 'int64'
class TestFracFloat32(TestFracAPI):
"""Test Frac API with data type float32"""
def set_dtype(self):
self.dtype = 'float32'
class TestFracError(unittest.TestCase):
"""Test Frac Error"""
def setUp(self):
self.x_np = np.random.uniform(-3, 3, [2, 3]).astype('int16')
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_static_error(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [5, 5], 'bool')
self.assertRaises(TypeError, paddle.frac, x)
def test_dygraph_error(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np, dtype='int16')
self.assertRaises(TypeError, paddle.frac, x)
if __name__ == '__main__':
unittest.main()
......@@ -248,7 +248,7 @@ class TestGatherNdAPI2(unittest.TestCase):
},
fetch_list=[out])
expected_output = np.array([[3, 4]])
self.assertTrue(np.allclose(result, expected_output))
np.testing.assert_allclose(result, expected_output, rtol=1e-05)
def test_imperative(self):
paddle.disable_static()
......@@ -258,8 +258,8 @@ class TestGatherNdAPI2(unittest.TestCase):
index = fluid.dygraph.to_variable(index_1)
output = paddle.fluid.layers.gather(input, index)
output_np = output.numpy()
expected_output = np.array([3, 4])
self.assertTrue(np.allclose(output_np, expected_output))
expected_output = np.array([[3, 4]])
np.testing.assert_allclose(output_np, expected_output, rtol=1e-05)
paddle.enable_static()
......
......@@ -746,9 +746,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
np_div = np.array([[0, 2, 3], [2, 8, 10], [1, 4, 5]], dtype="float32")
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res):
self.assertTrue(
np.allclose(np_res, paddle_res, atol=1e-6), "two value is\
{}\n{}, check diff!".format(np_res, paddle_res))
np.testing.assert_allclose(
np_res,
paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_res, paddle_res))
def test_compute_all_with_mean(self):
paddle.disable_static()
......@@ -774,9 +778,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
np_div = np.array([[0, 2, 3], [1, 4, 5], [1, 4, 5]], dtype="float32")
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res):
self.assertTrue(
np.allclose(np_res, paddle_res, atol=1e-6), "two value is\
{}\n{}, check diff!".format(np_res, paddle_res))
np.testing.assert_allclose(
np_res,
paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_res, paddle_res))
def test_compute_all_with_max(self):
paddle.disable_static()
......@@ -801,11 +809,15 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
np_mul = np.array([[0, 2, 3], [2, 6, 7], [1, 4, 5]], dtype="float32")
np_div = np.array([[0, 2, 3], [2, 6, 7], [1, 4, 5]], dtype="float32")
self.assertTrue(np.allclose(np_sub, res_sub, atol=1e-6))
np.testing.assert_allclose(np_sub, res_sub, rtol=1e-05, atol=1e-06)
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res):
self.assertTrue(
np.allclose(np_res, paddle_res, atol=1e-6), "two value is\
{}\n{}, check diff!".format(np_res, paddle_res))
np.testing.assert_allclose(
np_res,
paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_res, paddle_res))
def test_compute_all_with_max_fp16(self):
paddle.disable_static()
......@@ -840,13 +852,20 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
np_div = np.array([[0, 2, 3], [2, 6, 7], [1, 4, 5]],
dtype="float16")
self.assertTrue(np.allclose(np_sub, res_sub, atol=1e-6))
np.testing.assert_allclose(np_sub,
res_sub,
rtol=1e-05,
atol=1e-06)
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div],
res):
self.assertTrue(
np.allclose(np_res, paddle_res, atol=1e-6),
"two value is\
{}\n{}, check diff!".format(np_res, paddle_res))
np.testing.assert_allclose(
np_res,
paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg=
'two value is {}\n{}, check diff!'
.format(np_res, paddle_res))
def test_compute_all_with_min(self):
paddle.disable_static()
......@@ -872,9 +891,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
np_div = np.array([[0, 2, 3], [0, 2, 3], [1, 4, 5]], dtype="float32")
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res):
self.assertTrue(
np.allclose(np_res, paddle_res, atol=1e-6), "two value is\
{}\n{}, check diff!".format(np_res, paddle_res))
np.testing.assert_allclose(
np_res,
paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_res, paddle_res))
def test_compute_all_with_min_fp16(self):
paddle.disable_static()
......@@ -910,10 +933,14 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div],
res):
self.assertTrue(
np.allclose(np_res, paddle_res, atol=1e-6),
"two value is\
{}\n{}, check diff!".format(np_res, paddle_res))
np.testing.assert_allclose(
np_res,
paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg=
'two value is {}\n{}, check diff!'
.format(np_res, paddle_res))
def test_reshape_lhs_rhs(self):
paddle.disable_static()
......@@ -927,9 +954,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
"add", "min")
np_add = np.array([[1, 3, 4], [1, 3, 4], [2, 5, 6]],
dtype="float16").reshape([3, 3, 1])
self.assertTrue(
np.allclose(np_add, res_add, atol=1e-6), "two value is\
{}\n{}, check diff!".format(np_add, res_add))
np.testing.assert_allclose(
np_add,
res_add,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_add, res_add))
def test_out_size_tensor_static(self):
paddle.enable_static()
......@@ -962,9 +993,13 @@ class API_GeometricSendUERecvTest(unittest.TestCase):
'out_size': data5,
},
fetch_list=[res_sum])
self.assertTrue(
np.allclose(np_sum, ret[0], atol=1e-6), "two value is\
{}\n{}, check diff!".format(np_sum, ret[0]))
np.testing.assert_allclose(
np_sum,
ret[0],
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_sum, ret[0]))
def test_api_eager_dygraph(self):
with _test_eager_guard():
......
......@@ -198,9 +198,13 @@ class API_GeometricSendUVTest(unittest.TestCase):
dtype="float32")
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div], res):
self.assertTrue(
np.allclose(np_res, paddle_res, atol=1e-6), "two value is\
{}\n{}, check diff!".format(np_res, paddle_res))
np.testing.assert_allclose(
np_res,
paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'.
format(np_res, paddle_res))
def test_compute_all_static(self):
paddle.enable_static()
......@@ -256,9 +260,13 @@ class API_GeometricSendUVTest(unittest.TestCase):
fetch_list=[res_add, res_sub, res_mul, res_div])
for np_res, paddle_res in zip([np_add, np_sub, np_mul, np_div],
ret):
self.assertTrue(
np.allclose(np_res, paddle_res, atol=1e-6), "two value is\
{}\n{}, check diff!".format(np_res, paddle_res))
np.testing.assert_allclose(
np_res,
paddle_res,
rtol=1e-05,
atol=1e-06,
err_msg='two value is {}\n{}, check diff!'
.format(np_res, paddle_res))
def test_api_eager_dygraph(self):
with _test_eager_guard():
......
......@@ -61,7 +61,7 @@ class TestEagerGrad(TestCase):
# stop_gradient = !create_graph, create_graph default false
self.assertEqual(dx[0].stop_gradient, True)
self.assertTrue(np.allclose(dx[0].numpy(), expected_dx[0]))
np.testing.assert_allclose(dx[0].numpy(), expected_dx, rtol=1e-05)
def test_simple_example_eager_grad(self):
with _test_eager_guard():
......@@ -83,7 +83,7 @@ class TestEagerGrad(TestCase):
dx = fluid.dygraph.grad(out, [x, z], allow_unused=True)
dout = np.ones_like(np_y)
expected_dx = np.matmul(dout, np.transpose(np_y))
self.assertTrue(np.allclose(dx[0].numpy(), expected_dx[0]))
np.testing.assert_allclose(dx[0].numpy(), expected_dx, rtol=1e-05)
# stop_gradient = !create_graph, create_graph default false
self.assertEqual(dx[0].stop_gradient, True)
# x is unused input in the graph
......
......@@ -74,7 +74,7 @@ class TestMaskedSelectAPI(unittest.TestCase):
mask = paddle.to_tensor(np_mask)
out = paddle.masked_select(x, mask)
np_out = np_masked_select(np_x, np_mask)
self.assertEqual(np.allclose(out.numpy(), np_out), True)
np.testing.assert_allclose(out.numpy(), np_out, rtol=1e-05)
paddle.enable_static()
def test_static_mode(self):
......@@ -89,13 +89,13 @@ class TestMaskedSelectAPI(unittest.TestCase):
exe = paddle.static.Executor(place=paddle.CPUPlace())
res = exe.run(paddle.static.default_main_program(),
feed={
"x": np_x,
"mask": np_mask
},
fetch_list=[out])
self.assertEqual(np.allclose(res, np_out), True)
res, = exe.run(paddle.static.default_main_program(),
feed={
"x": np_x,
"mask": np_mask
},
fetch_list=[out])
np.testing.assert_allclose(res, np_out, rtol=1e-05)
class TestMaskedSelectError(unittest.TestCase):
......
......@@ -41,11 +41,11 @@ class TestMathOpPatches(unittest.TestCase):
b_np, c_np, d_np = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b, c, d])
self.assertTrue(np.allclose(a_np + 10, b_np))
np.testing.assert_allclose(a_np + 10, b_np, rtol=1e-05)
ab_np = np.concatenate([a_np, b_np], axis=1)
self.assertTrue(np.allclose(ab_np + 10, c_np))
np.testing.assert_allclose(ab_np + 10, c_np, rtol=1e-05)
d_expected = ab_np + np.concatenate([a_np, a_np], axis=1)
self.assertTrue(np.allclose(d_expected, d_np))
np.testing.assert_allclose(d_expected, d_np, rtol=1e-05)
@prog_scope()
def test_radd_scalar(self):
......@@ -57,7 +57,7 @@ class TestMathOpPatches(unittest.TestCase):
b_np = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
self.assertTrue(np.allclose(a_np + 10, b_np))
np.testing.assert_allclose(a_np + 10, b_np, rtol=1e-05)
@prog_scope()
def test_sub_scalar(self):
......@@ -66,10 +66,10 @@ class TestMathOpPatches(unittest.TestCase):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
self.assertTrue(np.allclose(a_np - 10, b_np))
b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
np.testing.assert_allclose(a_np - 10, b_np, rtol=1e-05)
@prog_scope()
def test_radd_scalar(self):
......@@ -78,10 +78,10 @@ class TestMathOpPatches(unittest.TestCase):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
self.assertTrue(np.allclose(10 - a_np, b_np))
b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
np.testing.assert_allclose(10 - a_np, b_np, rtol=1e-05)
@prog_scope()
def test_mul_scalar(self):
......@@ -90,10 +90,10 @@ class TestMathOpPatches(unittest.TestCase):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
self.assertTrue(np.allclose(a_np * 10, b_np))
b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
np.testing.assert_allclose(a_np * 10, b_np, rtol=1e-05)
@prog_scope()
def test_rmul_scalar(self):
......@@ -102,10 +102,10 @@ class TestMathOpPatches(unittest.TestCase):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
self.assertTrue(np.allclose(10 * a_np, b_np))
b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
np.testing.assert_allclose(10 * a_np, b_np, rtol=1e-05)
@prog_scope()
def test_div_scalar(self):
......@@ -114,10 +114,10 @@ class TestMathOpPatches(unittest.TestCase):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
self.assertTrue(np.allclose(a_np / 10, b_np))
b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
np.testing.assert_allclose(a_np / 10, b_np, rtol=1e-05)
@prog_scope()
def test_rdiv_scalar(self):
......@@ -127,10 +127,10 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2
b_np = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
self.assertTrue(np.allclose(10 / a_np, b_np))
b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
np.testing.assert_allclose(10 / a_np, b_np, rtol=1e-05)
@prog_scope()
def test_div_two_tensor(self):
......@@ -141,13 +141,13 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2
c_np = exe.run(fluid.default_main_program(),
feed={
"a": a_np,
'b': b_np
},
fetch_list=[c])
self.assertTrue(np.allclose(a_np / b_np, c_np))
c_np, = exe.run(fluid.default_main_program(),
feed={
"a": a_np,
'b': b_np
},
fetch_list=[c])
np.testing.assert_allclose(a_np / b_np, c_np, rtol=1e-05)
@prog_scope()
def test_mul_two_tensor(self):
......@@ -158,13 +158,13 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = np.random.random(size=[10, 1]).astype('float32')
c_np = exe.run(fluid.default_main_program(),
feed={
"a": a_np,
'b': b_np
},
fetch_list=[c])
self.assertTrue(np.allclose(a_np * b_np, c_np))
c_np, = exe.run(fluid.default_main_program(),
feed={
"a": a_np,
'b': b_np
},
fetch_list=[c])
np.testing.assert_allclose(a_np * b_np, c_np, rtol=1e-05)
@prog_scope()
def test_add_two_tensor(self):
......@@ -175,13 +175,13 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = np.random.random(size=[10, 1]).astype('float32')
c_np = exe.run(fluid.default_main_program(),
feed={
"a": a_np,
'b': b_np
},
fetch_list=[c])
self.assertTrue(np.allclose(a_np + b_np, c_np))
c_np, = exe.run(fluid.default_main_program(),
feed={
"a": a_np,
'b': b_np
},
fetch_list=[c])
np.testing.assert_allclose(a_np + b_np, c_np, rtol=1e-05)
@prog_scope()
def test_sub_two_tensor(self):
......@@ -192,13 +192,13 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = np.random.random(size=[10, 1]).astype('float32')
c_np = exe.run(fluid.default_main_program(),
feed={
"a": a_np,
'b': b_np
},
fetch_list=[c])
self.assertTrue(np.allclose(a_np - b_np, c_np))
c_np, = exe.run(fluid.default_main_program(),
feed={
"a": a_np,
'b': b_np
},
fetch_list=[c])
np.testing.assert_allclose(a_np - b_np, c_np, rtol=1e-05)
@prog_scope()
def test_integer_div(self):
......@@ -212,7 +212,7 @@ class TestMathOpPatches(unittest.TestCase):
fetch_list=[b])
b_np_actual = (a_np / 7).astype('float32')
self.assertTrue(np.allclose(b_np, b_np_actual))
np.testing.assert_allclose(b_np, b_np_actual, rtol=1e-05)
@prog_scope()
def test_equal(self):
......@@ -266,10 +266,10 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place)
a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float32')
b_np = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
self.assertTrue(np.allclose(-a_np, b_np))
b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
np.testing.assert_allclose(-a_np, b_np, rtol=1e-05)
@prog_scope()
def test_astype(self):
......@@ -279,10 +279,10 @@ class TestMathOpPatches(unittest.TestCase):
exe = fluid.Executor(place)
a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float64')
b_np = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
self.assertTrue(np.allclose(a_np.astype('float32'), b_np))
b_np, = exe.run(fluid.default_main_program(),
feed={"a": a_np},
fetch_list=[b])
np.testing.assert_allclose(a_np.astype('float32'), b_np, rtol=1e-05)
def test_bitwise_and(self):
x_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32")
......@@ -384,13 +384,13 @@ class TestMathOpPatches(unittest.TestCase):
b_np = np.random.uniform(-1, 1, size=[3, 5]).astype('float32')
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
c_np = exe.run(paddle.static.default_main_program(),
feed={
"a": a_np,
"b": b_np
},
fetch_list=[c])
self.assertTrue(np.allclose(a_np @ b_np, c_np))
c_np, = exe.run(paddle.static.default_main_program(),
feed={
"a": a_np,
"b": b_np
},
fetch_list=[c])
np.testing.assert_allclose(a_np @ b_np, c_np, rtol=1e-05)
if __name__ == '__main__':
......
......@@ -40,12 +40,12 @@ class TestMseLoss(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = Executor(place)
result = exe.run(fluid.default_main_program(),
feed={
"input": input_val,
"label": label_val
},
fetch_list=[output])
result, = exe.run(fluid.default_main_program(),
feed={
"input": input_val,
"label": label_val
},
fetch_list=[output])
np.testing.assert_allclose(np_result, result, rtol=1e-05)
......@@ -91,12 +91,12 @@ class TestNNMseLoss(unittest.TestCase):
ret = mse_loss(input, label)
exe = fluid.Executor(place)
static_result = exe.run(prog,
feed={
"input": input_np,
"label": label_np
},
fetch_list=[ret])
static_result, = exe.run(prog,
feed={
"input": input_np,
"label": label_np
},
fetch_list=[ret])
with fluid.dygraph.guard():
mse_loss = paddle.nn.loss.MSELoss()
......@@ -106,9 +106,9 @@ class TestNNMseLoss(unittest.TestCase):
sub = input_np - label_np
expected = np.mean(sub * sub)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-05)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(dy_result.shape, [1])
def test_NNMseLoss_sum(self):
......@@ -131,12 +131,12 @@ class TestNNMseLoss(unittest.TestCase):
ret = mse_loss(input, label)
exe = fluid.Executor(place)
static_result = exe.run(prog,
feed={
"input": input_np,
"label": label_np
},
fetch_list=[ret])
static_result, = exe.run(prog,
feed={
"input": input_np,
"label": label_np
},
fetch_list=[ret])
with fluid.dygraph.guard():
mse_loss = paddle.nn.loss.MSELoss(reduction='sum')
......@@ -146,9 +146,9 @@ class TestNNMseLoss(unittest.TestCase):
sub = input_np - label_np
expected = np.sum(sub * sub)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-05)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(dy_result.shape, [1])
def test_NNMseLoss_none(self):
......@@ -171,12 +171,12 @@ class TestNNMseLoss(unittest.TestCase):
ret = mse_loss(input, label)
exe = fluid.Executor(place)
static_result = exe.run(prog,
feed={
"input": input_np,
"label": label_np
},
fetch_list=[ret])
static_result, = exe.run(prog,
feed={
"input": input_np,
"label": label_np
},
fetch_list=[ret])
with fluid.dygraph.guard():
mse_loss = paddle.nn.loss.MSELoss(reduction='none')
......@@ -186,9 +186,9 @@ class TestNNMseLoss(unittest.TestCase):
sub = input_np - label_np
expected = (sub * sub)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-05)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(dy_result.shape, [1])
......@@ -214,12 +214,12 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
exe = paddle.static.Executor(place)
exe.run(startup_prog)
static_result = exe.run(prog,
feed={
"input": input_np,
"target": target_np
},
fetch_list=[mse_loss])
static_result, = exe.run(prog,
feed={
"input": input_np,
"target": target_np
},
fetch_list=[mse_loss])
paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss(paddle.to_tensor(input_np),
......@@ -229,9 +229,9 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
sub = input_np - target_np
expected = np.mean(sub * sub)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-05)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(dy_result.shape, [1])
def test_NNFunctionalMseLoss_sum(self):
......@@ -254,12 +254,12 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
exe = paddle.static.Executor(place)
exe.run(startup_prog)
static_result = exe.run(prog,
feed={
"input": input_np,
"target": target_np
},
fetch_list=[mse_loss])
static_result, = exe.run(prog,
feed={
"input": input_np,
"target": target_np
},
fetch_list=[mse_loss])
paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss(paddle.to_tensor(input_np),
......@@ -269,9 +269,9 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
sub = input_np - target_np
expected = np.sum(sub * sub)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-05)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(dy_result.shape, [1])
def test_NNFunctionalMseLoss_none(self):
......@@ -294,12 +294,12 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
exe = paddle.static.Executor(place)
exe.run(startup_prog)
static_result = exe.run(prog,
feed={
"input": input_np,
"target": target_np
},
fetch_list=[mse_loss])
static_result, = exe.run(prog,
feed={
"input": input_np,
"target": target_np
},
fetch_list=[mse_loss])
paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss(paddle.to_tensor(input_np),
......@@ -309,9 +309,9 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
sub = input_np - target_np
expected = sub * sub
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-05)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
self.assertTrue(dy_result.shape, [1])
......
......@@ -256,16 +256,20 @@ class APITestMultiDot(unittest.TestCase):
exe = paddle.static.Executor(paddle.CPUPlace())
data1 = np.random.rand(3, 2).astype("float64")
data2 = np.random.rand(2, 3).astype("float64")
np_res = exe.run(feed={
np_res, = exe.run(feed={
'x0': data1,
'x1': data2
},
fetch_list=[result])
fetch_list=[result])
expected_result = np.linalg.multi_dot([data1, data2])
self.assertTrue(
np.allclose(np_res, expected_result, atol=1e-5), "two value is\
{}\n{}, check diff!".format(np_res, expected_result))
np.testing.assert_allclose(
np_res,
expected_result,
rtol=1e-05,
atol=1e-05,
err_msg='two value is {}\n{}, check diff!'.format(
np_res, expected_result))
def test_dygraph_without_out(self):
paddle.disable_static()
......@@ -276,7 +280,7 @@ class APITestMultiDot(unittest.TestCase):
data2 = paddle.to_tensor(input_array2)
out = paddle.linalg.multi_dot([data1, data2])
expected_result = np.linalg.multi_dot([input_array1, input_array2])
self.assertTrue(np.allclose(expected_result, out.numpy()))
np.testing.assert_allclose(expected_result, out.numpy(), rtol=1e-05)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
......
......@@ -86,7 +86,7 @@ def test_static(place,
reduction=reduction)
exe = paddle.static.Executor(place)
static_result = exe.run(prog, feed=feed_dict, fetch_list=[res])
static_result, = exe.run(prog, feed=feed_dict, fetch_list=[res])
return static_result
......@@ -164,9 +164,9 @@ class TestMultiLabelMarginLoss(unittest.TestCase):
input_np=input,
label_np=label,
reduction=reduction)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-05)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static(place=place,
input_np=input,
label_np=label,
......@@ -177,9 +177,13 @@ class TestMultiLabelMarginLoss(unittest.TestCase):
label_np=label,
reduction=reduction,
functional=True)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
np.testing.assert_allclose(static_functional,
expected,
rtol=1e-05)
np.testing.assert_allclose(static_functional,
dy_functional,
rtol=1e-05)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
def test_MultiLabelSoftMarginLoss_error(self):
paddle.disable_static()
......@@ -217,9 +221,9 @@ class TestMultiLabelMarginLoss(unittest.TestCase):
label_np=label,
weight_np=weight,
reduction=reduction)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result, expected, rtol=1e-05)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static(place=place,
input_np=input,
label_np=label,
......@@ -232,9 +236,9 @@ class TestMultiLabelMarginLoss(unittest.TestCase):
weight=weight,
reduction=reduction,
functional=True)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
np.testing.assert_allclose(static_functional, expected, rtol=1e-05)
np.testing.assert_allclose(static_functional, dy_functional, rtol=1e-05)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)
def test_MultiLabelSoftMarginLoss_dimension(self):
paddle.disable_static()
......
......@@ -134,12 +134,12 @@ class TestNormalAPI(unittest.TestCase):
ret = ret.flatten().reshape([self.repeat_num, -1])
mean = np.mean(ret, axis=0)
std = np.std(ret, axis=0)
mean_ref=self.mean.reshape([1, -1]) \
mean_ref=self.mean.flatten() \
if isinstance(self.mean, np.ndarray) else self.mean
std_ref=self.std.reshape([1, -1]) \
std_ref=self.std.flatten() \
if isinstance(self.std, np.ndarray) else self.std
self.assertTrue(np.allclose(mean_ref, mean, 0.2, 0.2))
self.assertTrue(np.allclose(std_ref, std, 0.2, 0.2))
np.testing.assert_allclose(mean_ref, mean, rtol=0.2, atol=0.2)
np.testing.assert_allclose(std_ref, std, rtol=0.2, atol=0.2)
class TestNormalAPI_mean_is_tensor(TestNormalAPI):
......
......@@ -54,10 +54,10 @@ class TestNormalization(unittest.TestCase):
self.set_inputs(place)
exe = fluid.Executor(place)
output = exe.run(fluid.default_main_program(),
feed=self.inputs,
fetch_list=self.fetch_list,
return_numpy=True)
output, = exe.run(fluid.default_main_program(),
feed=self.inputs,
fetch_list=self.fetch_list,
return_numpy=True)
self.op_output = output
def set_inputs(self, place):
......@@ -91,7 +91,10 @@ class TestNormalization(unittest.TestCase):
expect_output = self.l2_normalize(self.data, axis, epsilon)
# check output
self.assertTrue(np.allclose(self.op_output, expect_output, atol=0.001))
np.testing.assert_allclose(self.op_output,
expect_output,
rtol=1e-05,
atol=0.001)
if __name__ == '__main__':
......
......@@ -60,9 +60,9 @@ class TestPassBuilder(unittest.TestCase):
test_loss, = exe.run(test_cp,
fetch_list=[loss.name],
feed=feed_dict)
train_loss = exe.run(train_cp,
fetch_list=[loss.name],
feed=feed_dict)
train_loss, = exe.run(train_cp,
fetch_list=[loss.name],
feed=feed_dict)
avg_test_loss_val = np.array(test_loss).mean()
if math.isnan(float(avg_test_loss_val)):
......@@ -72,10 +72,13 @@ class TestPassBuilder(unittest.TestCase):
if math.isnan(float(avg_train_loss_val)):
sys.exit("got NaN loss, training failed.")
self.assertTrue(
np.allclose(train_loss, test_loss,
atol=1e-8), "Train loss: " + str(train_loss) +
"\n Test loss:" + str(test_loss))
np.testing.assert_allclose(train_loss,
test_loss,
rtol=1e-05,
atol=1e-08,
err_msg='Train loss: ' +
str(train_loss) + '\n Test loss:' +
str(test_loss))
def test_parallel_testing_with_new_strategy(self):
build_strategy = fluid.BuildStrategy()
......
......@@ -32,7 +32,7 @@ def ref_prelu(x, weight):
neg_indices = x <= 0
assert x.shape == neg_indices.shape
x_t[neg_indices] = (x_t * weight)[neg_indices]
return (x_t, )
return x_t
def ref_prelu_nn(x, num_parameters, init):
......@@ -61,7 +61,7 @@ class TestFunctionalPReluAPI(unittest.TestCase):
},
fetch_list=[out])
out_ref = ref_prelu(self.x_np, weight_np)
self.assertEqual(np.allclose(out_ref, res[0]), True)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def dygraph_check(self, weight_np):
paddle.disable_static(self.place)
......@@ -69,7 +69,7 @@ class TestFunctionalPReluAPI(unittest.TestCase):
weight = paddle.to_tensor(weight_np)
out = F.prelu(x, weight)
out_ref = ref_prelu(self.x_np, weight_np)
self.assertEqual(np.allclose(out_ref, out.numpy()), True)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static()
def test_static_api(self):
......@@ -125,7 +125,7 @@ class TestNNPReluAPI(unittest.TestCase):
feed={'X': self.x_np},
fetch_list=[out])
out_ref = ref_prelu_nn(self.x_np, 1, 0.25)
self.assertEqual(np.allclose(out_ref, res[0]), True)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
......@@ -134,32 +134,32 @@ class TestNNPReluAPI(unittest.TestCase):
m = paddle.nn.PReLU()
out = m(x)
out_ref = ref_prelu_nn(self.x_np, 1, 0.25)
self.assertEqual(np.allclose(out_ref, out.numpy()), True)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
x = paddle.to_tensor(self.x_np)
m = paddle.nn.PReLU(num_parameters=self.x_np.shape[1])
out = m(x)
out_ref = ref_prelu_nn(self.x_np, self.x_np.shape[1], 0.25)
self.assertEqual(np.allclose(out_ref, out.numpy()), True)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
x = paddle.to_tensor(self.x_np)
m = paddle.nn.PReLU(init=0.5)
out = m(x)
out_ref = ref_prelu_nn(self.x_np, 1, 0.5)
self.assertEqual(np.allclose(out_ref, out.numpy()), True)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
x = paddle.to_tensor(self.x_np)
m = paddle.nn.PReLU(weight_attr=fluid.ParamAttr(name="weight"))
out = m(x)
out_ref = ref_prelu_nn(self.x_np, 1, 0.25)
self.assertEqual(np.allclose(out_ref, out.numpy()), True)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
x = paddle.to_tensor(self.x_np)
m = paddle.nn.PReLU(weight_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.5)))
out = m(x)
out_ref = ref_prelu_nn(self.x_np, 1, 0.5)
self.assertEqual(np.allclose(out_ref, out.numpy()), True)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static()
......
......@@ -334,13 +334,13 @@ class TestPSROIPoolStaticAPI(unittest.TestCase):
exe = paddle.static.Executor(place)
boxes_lod_data = paddle.fluid.create_lod_tensor(
self.boxes, [[1, 2]], place)
out_res = exe.run(paddle.static.default_main_program(),
feed={
'x': self.x,
'boxes': boxes_lod_data
},
fetch_list=[out.name])
self.assertTrue(np.allclose(out_res, expect_out))
out_res, = exe.run(paddle.static.default_main_program(),
feed={
'x': self.x,
'boxes': boxes_lod_data
},
fetch_list=[out.name])
np.testing.assert_allclose(out_res, expect_out, rtol=1e-05)
if __name__ == '__main__':
......
......@@ -63,9 +63,10 @@ class TestQueue(unittest.TestCase):
0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
ret = exe.run(main_program, fetch_list=[data_out.name])
self.assertTrue(
np.allclose(np.asarray(ret), np.full((2, 3), value, np.float32)))
ret, = exe.run(main_program, fetch_list=[data_out.name])
np.testing.assert_allclose(np.asarray(ret),
np.full((2, 3), value, np.float32),
rtol=1e-05)
if __name__ == '__main__':
......
......@@ -157,16 +157,20 @@ class TestReorderLoDTensor(unittest.TestCase):
# check output
expect_output, expect_output_lod = self.reorder()
for actual_output in self.actual_outputs:
self.assertTrue(
np.allclose(np.array(actual_output), expect_output, atol=0.001))
np.testing.assert_allclose(np.array(actual_output),
expect_output,
rtol=1e-05,
atol=0.001)
self.assertEqual(expect_output_lod,
actual_output.recursive_sequence_lengths())
# check gradient
expect_grad = np.ones_like(self.data[self.data_desc[0][0]][0])
expect_grad_lod = self.data[self.data_desc[0][0]][1]
for actual_grad in self.actual_grads:
self.assertTrue(
np.allclose(np.array(actual_grad), expect_grad, atol=0.001))
np.testing.assert_allclose(np.array(actual_grad),
expect_grad,
rtol=1e-05,
atol=0.001)
self.assertEqual(expect_grad_lod,
actual_grad.recursive_sequence_lengths())
......@@ -177,16 +181,20 @@ class TestReorderLoDTensor(unittest.TestCase):
# check output
expect_output, expect_output_lod = self.reorder()
for actual_output in self.actual_outputs:
self.assertTrue(
np.allclose(np.array(actual_output), expect_output, atol=0.001))
np.testing.assert_allclose(np.array(actual_output),
expect_output,
rtol=1e-05,
atol=0.001)
self.assertEqual(expect_output_lod,
actual_output.recursive_sequence_lengths())
# check gradient
expect_grad = np.ones_like(self.data[self.data_desc[0][0]][0])
expect_grad_lod = self.data[self.data_desc[0][0]][1]
for actual_grad in self.actual_grads:
self.assertTrue(
np.allclose(np.array(actual_grad), expect_grad, atol=0.001))
np.testing.assert_allclose(np.array(actual_grad),
expect_grad,
rtol=1e-05,
atol=0.001)
self.assertEqual(expect_grad_lod,
actual_grad.recursive_sequence_lengths())
......@@ -196,13 +204,16 @@ class TestReorderLoDTensor(unittest.TestCase):
self.inputs[self.data_desc[0][0]].set_recursive_sequence_lengths(
input_lod)
# preserve the output of LodTensor with implicit lod to compare
expect_output = [
expect_outputs = [
np.array(actual_output) for actual_output in self.actual_outputs
]
self.run_program()
for actual_output in self.actual_outputs:
self.assertTrue(
np.allclose(np.array(actual_output), expect_output, atol=0.001))
for actual_output, expect_output in zip(self.actual_outputs,
expect_outputs):
np.testing.assert_allclose(np.array(actual_output),
expect_output,
rtol=1e-05,
atol=0.001)
class TestReorderLoDTensorError(unittest.TestCase):
......
......@@ -78,13 +78,13 @@ class TestFunctionalRReluAPI(unittest.TestCase):
feed={"input": in_np},
fetch_list=[res1])
self.assertTrue(np.allclose(fetches[0], res_np1))
np.testing.assert_allclose(fetches[0], res_np1, rtol=1e-05)
res_np2 = ref_rrelu(in_np, self.lower_1, self.upper_1)
fetches = exe.run(fluid.default_main_program(),
feed={"input": in_np},
fetch_list=[res2])
self.assertTrue(np.allclose(fetches[0], res_np2))
np.testing.assert_allclose(fetches[0], res_np2, rtol=1e-05)
def test_static(self):
for place in self.places:
......@@ -106,23 +106,23 @@ class TestFunctionalRReluAPI(unittest.TestCase):
out_3 = F.rrelu(x_2, self.lower_1, self.upper_1, training=True)
exe = paddle.static.Executor(place=place)
res_1 = exe.run(fluid.default_main_program(),
feed={"x": self.x_np},
fetch_list=out_1,
use_prune=True)
res_2 = exe.run(fluid.default_main_program(),
feed={"x2": self.x_np},
fetch_list=out_2,
use_prune=True)
res_3 = exe.run(fluid.default_main_program(),
feed={"x2": self.x_np},
fetch_list=out_3,
use_prune=True)
res_1, = exe.run(fluid.default_main_program(),
feed={"x": self.x_np},
fetch_list=out_1,
use_prune=True)
res_2, = exe.run(fluid.default_main_program(),
feed={"x2": self.x_np},
fetch_list=out_2,
use_prune=True)
res_3, = exe.run(fluid.default_main_program(),
feed={"x2": self.x_np},
fetch_list=out_3,
use_prune=True)
out_ref_1 = ref_rrelu(self.x_np, self.lower_0, self.upper_0)
out_ref_2 = ref_rrelu(self.x_np, self.lower_1, self.upper_1)
self.assertEqual(np.allclose(out_ref_1, res_1), True)
self.assertEqual(np.allclose(out_ref_2, res_2), True)
np.testing.assert_allclose(out_ref_1, res_1, rtol=1e-05)
np.testing.assert_allclose(out_ref_2, res_2, rtol=1e-05)
self.assertTrue(
check_output(self.x_np, res_3[0], self.lower_1, self.upper_1))
......@@ -164,7 +164,7 @@ class TestFunctionalRReluAPI(unittest.TestCase):
x = paddle.to_tensor(self.x_np)
out = F.rrelu(x, lower, upper, training=False)
out_ref = ref_rrelu(self.x_np, lower, upper)
self.assertEqual(np.allclose(out_ref, out), True)
np.testing.assert_allclose(out_ref, out, rtol=1e-05)
paddle.enable_static()
def test_dygraph_functional(self):
......
......@@ -119,13 +119,13 @@ class TestSearchSortedAPI(unittest.TestCase):
dtype="float64")
out = paddle.searchsorted(sorted_sequence, values)
exe = paddle.static.Executor(place)
res = exe.run(feed={
res, = exe.run(feed={
'SortedSequence': self.sorted_sequence,
'Values': self.values
},
fetch_list=out)
fetch_list=out)
out_ref = np.searchsorted(self.sorted_sequence, self.values)
self.assertTrue(np.allclose(out_ref, res))
np.testing.assert_allclose(out_ref, res, rtol=1e-05)
for place in self.place:
run(place)
......@@ -141,7 +141,7 @@ class TestSearchSortedAPI(unittest.TestCase):
out_ref = np.searchsorted(self.sorted_sequence,
self.values,
side='right')
self.assertEqual(np.allclose(out_ref, out.numpy()), True)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static()
for place in self.place:
......
......@@ -69,7 +69,7 @@ class TestSignAPI(unittest.TestCase):
z = paddle.sgn(x)
np_z = z.numpy()
z_expected = np_sgn(np_x)
self.assertTrue(np.allclose(np_z, z_expected))
np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
def test_float(self):
for dtype in self.support_dtypes:
......@@ -78,7 +78,7 @@ class TestSignAPI(unittest.TestCase):
z = paddle.sgn(x)
np_z = z.numpy()
z_expected = np_sgn(np_x)
self.assertTrue(np.allclose(np_z, z_expected))
np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
if __name__ == "__main__":
......
......@@ -142,9 +142,9 @@ class TestSigmoidFocalLoss(unittest.TestCase):
for alpha in alphas:
for gamma in gammas:
for normalizer_np in normalizer_nps:
static_result = test_static(place, logit_np,
label_np, normalizer_np,
alpha, gamma, reduction)
static_result, = test_static(
place, logit_np, label_np, normalizer_np, alpha,
gamma, reduction)
dy_result = test_dygraph(place, logit_np, label_np,
normalizer_np, alpha,
gamma, reduction)
......@@ -155,12 +155,18 @@ class TestSigmoidFocalLoss(unittest.TestCase):
expected = calc_sigmoid_focal_loss(
logit_np, label_np, normalizer_np, alpha, gamma,
reduction)
self.assertTrue(np.allclose(static_result,
expected))
self.assertTrue(
np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
self.assertTrue(np.allclose(eager_result, expected))
np.testing.assert_allclose(static_result,
expected,
rtol=1e-05)
np.testing.assert_allclose(static_result,
dy_result,
rtol=1e-05)
np.testing.assert_allclose(dy_result,
expected,
rtol=1e-05)
np.testing.assert_allclose(eager_result,
expected,
rtol=1e-05)
def test_SigmoidFocalLoss_error(self):
paddle.disable_static()
......
......@@ -58,12 +58,12 @@ class SmoothL1Loss(unittest.TestCase):
ret = smooth_l1_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
static_ret, = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
smooth_l1_loss = paddle.nn.loss.SmoothL1Loss()
......@@ -72,9 +72,9 @@ class SmoothL1Loss(unittest.TestCase):
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = smooth_l1_loss_np(input_np, label_np, reduction='mean')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, expected, rtol=1e-05)
np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
def test_smooth_l1_loss_sum(self):
input_np = np.random.random([100, 200]).astype(np.float32)
......@@ -90,12 +90,12 @@ class SmoothL1Loss(unittest.TestCase):
ret = smooth_l1_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
static_ret, = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='sum')
......@@ -104,9 +104,9 @@ class SmoothL1Loss(unittest.TestCase):
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = smooth_l1_loss_np(input_np, label_np, reduction='sum')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, expected, rtol=1e-05)
np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
def test_smooth_l1_loss_none(self):
input_np = np.random.random([100, 200]).astype(np.float32)
......@@ -122,12 +122,12 @@ class SmoothL1Loss(unittest.TestCase):
ret = smooth_l1_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
static_ret, = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='none')
......@@ -136,9 +136,9 @@ class SmoothL1Loss(unittest.TestCase):
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = smooth_l1_loss_np(input_np, label_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, expected, rtol=1e-05)
np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
def test_smooth_l1_loss_delta(self):
input_np = np.random.random([100, 200]).astype(np.float32)
......@@ -155,12 +155,12 @@ class SmoothL1Loss(unittest.TestCase):
ret = smooth_l1_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
static_ret, = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(delta=delta)
......@@ -169,9 +169,9 @@ class SmoothL1Loss(unittest.TestCase):
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = smooth_l1_loss_np(input_np, label_np, delta=delta)
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
np.testing.assert_allclose(static_ret, expected, rtol=1e-05)
np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
if __name__ == "__main__":
......
......@@ -36,12 +36,12 @@ def test_static_layer(
sm_loss = paddle.nn.loss.SoftMarginLoss(reduction=reduction)
res = sm_loss(input, label)
exe = paddle.static.Executor(place)
static_result = exe.run(prog,
feed={
"input": input_np,
"label": label_np
},
fetch_list=[res])
static_result, = exe.run(prog,
feed={
"input": input_np,
"label": label_np
},
fetch_list=[res])
return static_result
......@@ -66,12 +66,12 @@ def test_static_functional(
label,
reduction=reduction)
exe = paddle.static.Executor(place)
static_result = exe.run(prog,
feed={
"input": input_np,
"label": label_np
},
fetch_list=[res])
static_result, = exe.run(prog,
feed={
"input": input_np,
"label": label_np
},
fetch_list=[res])
return static_result
......@@ -146,17 +146,26 @@ class TestSoftMarginLoss(unittest.TestCase):
reduction)
expected = calc_softmarginloss(input_np, label_np,
reduction)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
np.testing.assert_allclose(static_result,
expected,
rtol=1e-05)
np.testing.assert_allclose(static_result,
dy_result,
rtol=1e-05)
np.testing.assert_allclose(dy_result, expected, rtol=1e-05)
static_functional = test_static_functional(
place, input_np, label_np, reduction)
dy_functional = test_dygraph_functional(
place, input_np, label_np, reduction)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(
np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
np.testing.assert_allclose(static_functional,
expected,
rtol=1e-05)
np.testing.assert_allclose(static_functional,
dy_functional,
rtol=1e-05)
np.testing.assert_allclose(dy_functional,
expected,
rtol=1e-05)
def test_SoftMarginLoss_error(self):
paddle.disable_static()
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from test_softmax_op import ref_softmax
class TestSoftmax2DAPI(unittest.TestCase):
def setUp(self):
self.shape = [2, 6, 5, 4]
self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64')
self.axis = -3
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
m = paddle.nn.Softmax2D()
out = m(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_softmax(self.x_np, self.axis)
self.assertTrue(np.allclose(out_ref, res))
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
m = paddle.nn.Softmax2D()
out = m(x)
out_ref = ref_softmax(self.x_np, self.axis)
self.assertTrue(np.allclose(out_ref, out.numpy()))
paddle.enable_static()
class TestSoftmax2DShape(TestSoftmax2DAPI):
def setUp(self):
self.shape = [2, 6, 4]
self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64')
self.axis = -3
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
class TestSoftmax2DFloat32(TestSoftmax2DAPI):
def setUp(self):
self.shape = [2, 3, 4]
self.x_np = np.random.uniform(-1, 1, self.shape).astype('float32')
self.axis = -3
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
class TestSoftmax2DCPU(TestSoftmax2DAPI):
def setUp(self):
self.shape = [2, 6, 4]
self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64')
self.axis = -3
self.place = paddle.CPUPlace()
class TestSoftmax2DRepr(unittest.TestCase):
def setUp(self):
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_extra_repr(self):
paddle.disable_static(self.place)
m = paddle.nn.Softmax2D(name='test')
self.assertTrue(m.extra_repr() == 'name=test')
paddle.enable_static()
class TestSoftmax2DError(unittest.TestCase):
def setUp(self):
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_static_error(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [5, 5], 'float32')
m = paddle.nn.Softmax2D()
self.assertRaises(AssertionError, m, x)
def test_dygraph_error(self):
paddle.disable_static(self.place)
x_np = np.random.randn(2, 3, 4, 2, 3)
x = paddle.to_tensor(x_np, dtype='float64')
m = paddle.nn.Softmax2D()
self.assertRaises(AssertionError, m, x)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from test_softmax_op import ref_softmax
class TestSoftmax2DAPI(unittest.TestCase):
def setUp(self):
self.shape = [2, 6, 5, 4]
self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64')
self.axis = -3
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
m = paddle.nn.Softmax2D()
out = m(x)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_softmax(self.x_np, self.axis)
np.testing.assert_allclose(out_ref, res, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
m = paddle.nn.Softmax2D()
out = m(x)
out_ref = ref_softmax(self.x_np, self.axis)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static()
class TestSoftmax2DShape(TestSoftmax2DAPI):
def setUp(self):
self.shape = [2, 6, 4]
self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64')
self.axis = -3
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
class TestSoftmax2DFloat32(TestSoftmax2DAPI):
def setUp(self):
self.shape = [2, 3, 4]
self.x_np = np.random.uniform(-1, 1, self.shape).astype('float32')
self.axis = -3
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
class TestSoftmax2DCPU(TestSoftmax2DAPI):
def setUp(self):
self.shape = [2, 6, 4]
self.x_np = np.random.uniform(-1, 1, self.shape).astype('float64')
self.axis = -3
self.place = paddle.CPUPlace()
class TestSoftmax2DRepr(unittest.TestCase):
def setUp(self):
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_extra_repr(self):
paddle.disable_static(self.place)
m = paddle.nn.Softmax2D(name='test')
self.assertTrue(m.extra_repr() == 'name=test')
paddle.enable_static()
class TestSoftmax2DError(unittest.TestCase):
def setUp(self):
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_static_error(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [5, 5], 'float32')
m = paddle.nn.Softmax2D()
self.assertRaises(AssertionError, m, x)
def test_dygraph_error(self):
paddle.disable_static(self.place)
x_np = np.random.randn(2, 3, 4, 2, 3)
x = paddle.to_tensor(x_np, dtype='float64')
m = paddle.nn.Softmax2D()
self.assertRaises(AssertionError, m, x)
if __name__ == '__main__':
unittest.main()
......@@ -41,14 +41,14 @@ class TestSquareErrorCost(unittest.TestCase):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = Executor(place)
result = exe.run(fluid.default_main_program(),
feed={
"input": input_val,
"label": label_val
},
fetch_list=[output])
self.assertTrue(np.isclose(np_result, result).all())
result, = exe.run(fluid.default_main_program(),
feed={
"input": input_val,
"label": label_val
},
fetch_list=[output])
np.testing.assert_allclose(np_result, result, rtol=1e-05)
class TestSquareErrorInvalidInput(unittest.TestCase):
......
......@@ -71,9 +71,9 @@ class TestTrilIndicesAPICaseStatic(unittest.TestCase):
paddle.static.Program()):
data1 = paddle.tril_indices(4, 4, -1)
exe1 = paddle.static.Executor(place)
result1 = exe1.run(feed={}, fetch_list=[data1])
result1, = exe1.run(feed={}, fetch_list=[data1])
expected_result1 = np.tril_indices(4, -1, 4)
self.assertTrue(np.allclose(result1, expected_result1))
np.testing.assert_allclose(result1, expected_result1, rtol=1e-05)
class TestTrilIndicesAPICaseDygraph(unittest.TestCase):
......@@ -121,9 +121,9 @@ class TestTrilIndicesAPICaseDefault(unittest.TestCase):
paddle.static.Program()):
data = paddle.tril_indices(4, None, 2)
exe = paddle.static.Executor(paddle.CPUPlace())
result = exe.run(feed={}, fetch_list=[data])
result, = exe.run(feed={}, fetch_list=[data])
expected_result = np.tril_indices(4, 2)
self.assertTrue(np.allclose(result, expected_result))
np.testing.assert_allclose(result, expected_result, rtol=1e-05)
with fluid.dygraph.base.guard(paddle.CPUPlace()):
out = paddle.tril_indices(4, None, 2)
......
......@@ -264,7 +264,7 @@ class API_TestDygraphUnSqueeze(unittest.TestCase):
output = self.unsqueeze(input, axis=[1])
out_np = output.numpy()
expected_out = np.expand_dims(input_1, axis=1)
self.assertTrue(np.allclose(expected_out, out_np))
np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
def test_out_int8(self):
paddle.disable_static()
......@@ -273,7 +273,7 @@ class API_TestDygraphUnSqueeze(unittest.TestCase):
output = self.unsqueeze(input, axis=[1])
out_np = output.numpy()
expected_out = np.expand_dims(input_1, axis=1)
self.assertTrue(np.allclose(expected_out, out_np))
np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
def test_out_uint8(self):
paddle.disable_static()
......@@ -282,7 +282,7 @@ class API_TestDygraphUnSqueeze(unittest.TestCase):
output = self.unsqueeze(input, axis=1)
out_np = output.numpy()
expected_out = np.expand_dims(input_1, axis=1)
self.assertTrue(np.allclose(expected_out, out_np))
np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
def test_axis_not_list(self):
paddle.disable_static()
......@@ -291,7 +291,7 @@ class API_TestDygraphUnSqueeze(unittest.TestCase):
output = self.unsqueeze(input, axis=1)
out_np = output.numpy()
expected_out = np.expand_dims(input_1, axis=1)
self.assertTrue(np.allclose(expected_out, out_np))
np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
def test_dimension_not_1(self):
paddle.disable_static()
......@@ -299,8 +299,8 @@ class API_TestDygraphUnSqueeze(unittest.TestCase):
input = paddle.to_tensor(input_1)
output = self.unsqueeze(input, axis=(1, 2))
out_np = output.numpy()
expected_out = np.expand_dims(input_1, axis=1)
self.assertTrue(np.allclose(expected_out, out_np))
expected_out = np.expand_dims(input_1, axis=(1, 2))
np.testing.assert_allclose(expected_out, out_np, rtol=1e-05)
class API_TestDygraphUnSqueezeInplace(API_TestDygraphUnSqueeze):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册