From 93c5c8878eaf26e443270b755585be303c3c7b59 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 10 Aug 2022 11:53:34 +0800 Subject: [PATCH] [CodeStyle] use np.testing.assert_array_equal instead of self.assertTrue(np.array_equal(...)) (#44947) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * automatically fix * update comments * numpy -> np * self.assertEqual(..., True) * wrong usage (err_msg=True) 这不是修复导致的错误,这些是原来 `self.assertTrue(..., True)` 的错误用法,因此在修复后将其认为位置参数 `err_msg` * some missing fix --- .../custom_kernel/test_custom_kernel_dot.py | 14 +- .../custom_kernel/test_custom_kernel_load.py | 7 +- .../tests/custom_op/test_context_pool.py | 2 +- .../tests/custom_op/test_custom_attrs_jit.py | 4 +- .../tests/custom_op/test_custom_concat.py | 7 +- .../fluid/tests/custom_op/test_custom_conj.py | 7 +- .../tests/custom_op/test_custom_linear.py | 7 +- .../custom_op/test_custom_raw_op_kernel_op.py | 2 +- .../tests/custom_op/test_custom_relu_model.py | 36 +- .../custom_op/test_custom_relu_op_jit.py | 27 +- .../custom_op/test_custom_relu_op_setup.py | 68 +-- .../custom_op/test_custom_simple_slice.py | 8 +- .../tests/custom_op/test_dispatch_jit.py | 7 +- .../tests/custom_op/test_multi_out_jit.py | 10 +- .../custom_runtime/test_custom_cpu_plugin.py | 10 +- python/paddle/fluid/tests/test_detection.py | 12 +- python/paddle/fluid/tests/test_lod_tensor.py | 22 +- .../test_convert_operators.py | 2 +- .../dygraph_to_static/test_deepcopy.py | 4 +- .../dygraph_to_static/test_partial_program.py | 2 +- .../dygraph_to_static/test_rollback.py | 8 +- .../unittests/dygraph_to_static/test_slice.py | 25 +- .../test_standalone_controlflow.py | 4 +- .../interpreter/test_standalone_executor.py | 6 +- .../tests/unittests/mlu/test_scale_op_mlu.py | 4 +- .../unittests/mlu/test_transpose_op_mlu.py | 8 +- .../paddle/fluid/tests/unittests/op_test.py | 13 +- .../unittests/test_activation_sparse_op.py | 2 +- .../fluid/tests/unittests/test_assign_op.py | 8 +- .../tests/unittests/test_assign_value_op.py | 24 +- .../fluid/tests/unittests/test_base_layer.py | 6 +- .../unittests/test_beam_search_decode_op.py | 5 +- .../tests/unittests/test_bernoulli_op.py | 4 +- .../test_buffer_shared_memory_reuse_pass.py | 23 +- .../tests/unittests/test_calc_gradient.py | 2 +- .../fluid/tests/unittests/test_cast_op.py | 7 +- .../tests/unittests/test_compiled_program.py | 4 +- .../fluid/tests/unittests/test_concat_op.py | 6 +- .../fluid/tests/unittests/test_conj_op.py | 8 +- .../fluid/tests/unittests/test_cuda_graph.py | 8 +- .../test_cuda_graph_partial_graph.py | 6 +- .../tests/unittests/test_cuda_stream_event.py | 2 +- .../fluid/tests/unittests/test_cumsum_op.py | 8 +- .../test_decoupled_py_reader_data_check.py | 4 +- .../fluid/tests/unittests/test_dot_op.py | 5 +- .../fluid/tests/unittests/test_dropout_op.py | 14 +- .../test_dynamic_rnn_stop_gradient.py | 2 +- .../tests/unittests/test_eager_run_program.py | 10 +- .../unittests/test_egr_code_generate_api.py | 4 +- .../tests/unittests/test_egr_python_api.py | 125 +++-- .../unittests/test_egr_string_tensor_api.py | 22 +- .../unittests/test_elementwise_min_op.py | 14 +- .../unittests/test_elementwise_pow_op.py | 6 +- .../test_embedding_id_stop_gradient.py | 2 +- .../test_executor_check_fetch_list.py | 2 +- ..._executor_return_tensor_not_overwriting.py | 4 +- .../fluid/tests/unittests/test_expand_op.py | 6 +- .../tests/unittests/test_expand_v2_op.py | 10 +- .../tests/unittests/test_exponential_op.py | 2 +- .../fluid/tests/unittests/test_fc_op.py | 2 +- .../fluid/tests/unittests/test_fetch_var.py | 12 +- .../fluid/tests/unittests/test_fill_any_op.py | 2 +- .../tests/unittests/test_fill_constant_op.py | 2 +- .../fluid/tests/unittests/test_fill_op.py | 2 +- .../tests/unittests/test_fused_matmul_bias.py | 10 +- .../fluid/tests/unittests/test_gather_op.py | 2 +- .../test_imperative_auto_mixed_precision.py | 6 +- ...perative_auto_mixed_precision_for_eager.py | 6 +- .../unittests/test_imperative_auto_prune.py | 11 +- .../tests/unittests/test_imperative_basic.py | 86 ++-- .../test_imperative_data_parallel.py | 6 +- .../unittests/test_imperative_double_grad.py | 43 +- .../test_imperative_hook_for_layer.py | 18 +- .../test_imperative_load_static_param.py | 2 +- ..._imperative_lod_tensor_to_selected_rows.py | 5 +- .../unittests/test_imperative_numpy_bridge.py | 11 +- .../test_imperative_ocr_attention_model.py | 4 +- .../unittests/test_imperative_ptb_rnn.py | 14 +- ...test_imperative_ptb_rnn_sorted_gradient.py | 14 +- .../test_imperative_recurrent_usage.py | 8 +- .../unittests/test_imperative_save_load.py | 66 ++- .../unittests/test_imperative_save_load_v2.py | 67 ++- ..._imperative_selected_rows_to_lod_tensor.py | 8 +- .../test_imperative_static_runner_mnist.py | 10 +- .../test_imperative_static_runner_while.py | 2 +- ...imperative_trace_non_persistable_inputs.py | 2 +- ..._imperative_transformer_sorted_gradient.py | 22 +- .../unittests/test_imperative_triple_grad.py | 25 +- .../test_imperative_using_non_zero_gpu.py | 2 +- .../fluid/tests/unittests/test_initializer.py | 42 +- .../fluid/tests/unittests/test_inplace.py | 6 +- .../unittests/test_inplace_addto_strategy.py | 2 +- .../test_inplace_auto_generated_apis.py | 2 +- .../tests/unittests/test_jit_save_load.py | 60 +-- .../fluid/tests/unittests/test_lambv2_op.py | 9 +- .../tests/unittests/test_layer_norm_op.py | 2 +- .../fluid/tests/unittests/test_layers.py | 375 +++++++------- .../fluid/tests/unittests/test_load_op.py | 2 +- .../fluid/tests/unittests/test_load_op_xpu.py | 2 +- .../test_load_state_dict_from_old_format.py | 2 +- .../tests/unittests/test_lod_tensor_array.py | 16 +- .../unittests/test_lookup_table_bf16_op.py | 4 +- .../unittests/test_lookup_table_v2_bf16_op.py | 4 +- .../tests/unittests/test_math_op_patch.py | 104 ++-- .../unittests/test_math_op_patch_var_base.py | 421 +++++++--------- .../tests/unittests/test_matmul_v2_op.py | 2 +- .../fluid/tests/unittests/test_mean_op.py | 4 +- .../fluid/tests/unittests/test_memcpy_op.py | 4 +- .../test_memory_reuse_exclude_feed_var.py | 2 +- .../tests/unittests/test_merged_adam_op.py | 2 +- .../unittests/test_merged_momentum_op.py | 6 +- .../tests/unittests/test_mixed_precision.py | 6 +- .../tests/unittests/test_multinomial_op.py | 10 +- .../unittests/test_op_function_generator.py | 13 +- .../fluid/tests/unittests/test_ops_nms.py | 28 +- .../fluid/tests/unittests/test_optimizer.py | 3 +- .../test_paddle_imperative_double_grad.py | 10 +- .../tests/unittests/test_paddle_save_load.py | 256 +++++----- .../unittests/test_paddle_save_load_binary.py | 15 +- ...st_parallel_executor_fetch_isolated_var.py | 2 +- ...el_executor_inference_feed_partial_data.py | 6 +- .../fluid/tests/unittests/test_parameter.py | 16 +- .../fluid/tests/unittests/test_poisson_op.py | 22 +- .../fluid/tests/unittests/test_prune.py | 26 +- .../unittests/test_py_reader_combination.py | 4 +- .../fluid/tests/unittests/test_randint_op.py | 12 +- .../fluid/tests/unittests/test_randperm_op.py | 24 +- .../tests/unittests/test_real_imag_op.py | 6 +- .../fluid/tests/unittests/test_reverse_op.py | 4 +- .../fluid/tests/unittests/test_scale_op.py | 4 +- .../tests/unittests/test_scatter_nd_op.py | 5 +- .../fluid/tests/unittests/test_scatter_op.py | 2 +- .../tests/unittests/test_set_value_op.py | 76 +-- .../tests/unittests/test_shuffle_batch_op.py | 2 +- .../fluid/tests/unittests/test_slice_op.py | 44 +- .../fluid/tests/unittests/test_split_op.py | 2 +- .../tests/unittests/test_split_program.py | 6 +- .../unittests/test_squared_l2_norm_op.py | 2 +- .../fluid/tests/unittests/test_stack_op.py | 10 +- .../tests/unittests/test_static_save_load.py | 30 +- .../unittests/test_static_save_load_bf16.py | 2 +- .../unittests/test_static_save_load_large.py | 4 +- .../tests/unittests/test_strided_slice_op.py | 9 +- .../fluid/tests/unittests/test_sum_op.py | 19 +- .../fluid/tests/unittests/test_tensor.py | 104 ++-- .../unittests/test_tensor_array_to_tensor.py | 68 ++- .../tests/unittests/test_tensor_copy_from.py | 6 +- .../unittests/test_tensor_register_hook.py | 97 ++-- ...st_tensor_scalar_type_promotion_dynamic.py | 2 +- ...est_tensor_scalar_type_promotion_static.py | 2 +- .../tests/unittests/test_translated_layer.py | 10 +- .../tests/unittests/test_transpose_op.py | 8 +- .../fluid/tests/unittests/test_unbind_op.py | 460 +++++++++--------- .../tests/unittests/test_unsqueeze_op.py | 10 +- .../fluid/tests/unittests/test_var_base.py | 313 ++++++------ .../fluid/tests/unittests/test_variable.py | 146 +++--- .../test_view_op_reuse_allocation.py | 2 +- .../fluid/tests/unittests/test_where_op.py | 2 +- .../tests/unittests/test_while_loop_op.py | 2 +- .../tests/unittests/xpu/test_scale_op_xpu.py | 4 +- python/paddle/tests/test_dlpack.py | 17 +- python/paddle/tests/test_hapi_amp.py | 9 +- 162 files changed, 2026 insertions(+), 2140 deletions(-) diff --git a/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_dot.py b/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_dot.py index 130f74c06d5..55b3fad1f35 100644 --- a/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_dot.py +++ b/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_dot.py @@ -43,9 +43,10 @@ class TestCustomKernelDot(unittest.TestCase): y = paddle.to_tensor(y_data) out = paddle.dot(x, y) - self.assertTrue( - np.array_equal(out.numpy(), result), - "custom kernel dot out: {},\n numpy dot out: {}".format( + np.testing.assert_array_equal( + out.numpy(), + result, + err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format( out.numpy(), result)) @@ -72,9 +73,10 @@ class TestCustomKernelDotC(unittest.TestCase): y = paddle.to_tensor(y_data) out = paddle.dot(x, y) - self.assertTrue( - np.array_equal(out.numpy(), result), - "custom kernel dot out: {},\n numpy dot out: {}".format( + np.testing.assert_array_equal( + out.numpy(), + result, + err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format( out.numpy(), result)) diff --git a/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py b/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py index a4def8df9e0..4ca05909fb1 100644 --- a/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py +++ b/python/paddle/fluid/tests/custom_kernel/test_custom_kernel_load.py @@ -65,9 +65,10 @@ class TestCustomKernelLoad(unittest.TestCase): y = paddle.to_tensor(y_data) out = paddle.dot(x, y) - self.assertTrue( - np.array_equal(out.numpy(), result), - "custom kernel dot out: {},\n numpy dot out: {}".format( + np.testing.assert_array_equal( + out.numpy(), + result, + err_msg='custom kernel dot out: {},\n numpy dot out: {}'.format( out.numpy(), result)) def tearDown(self): diff --git a/python/paddle/fluid/tests/custom_op/test_context_pool.py b/python/paddle/fluid/tests/custom_op/test_context_pool.py index d4a079ee4fe..69b8b18559e 100644 --- a/python/paddle/fluid/tests/custom_op/test_context_pool.py +++ b/python/paddle/fluid/tests/custom_op/test_context_pool.py @@ -51,7 +51,7 @@ class TestContextPool(unittest.TestCase): x = paddle.ones([2, 2], dtype='float32') out = custom_ops.context_pool_test(x) - self.assertTrue(np.array_equal(x.numpy(), out.numpy())) + np.testing.assert_array_equal(x.numpy(), out.numpy()) def test_using_context_pool(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/custom_op/test_custom_attrs_jit.py b/python/paddle/fluid/tests/custom_op/test_custom_attrs_jit.py index 953ca551906..a0be75c0a41 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_attrs_jit.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_attrs_jit.py @@ -66,7 +66,7 @@ class TestJitCustomAttrs(unittest.TestCase): out.stop_gradient = False out.backward() - self.assertTrue(np.array_equal(x.numpy(), out.numpy())) + np.testing.assert_array_equal(x.numpy(), out.numpy()) def test_attr_value(self): with _test_eager_guard(): @@ -85,7 +85,7 @@ class TestJitCustomAttrs(unittest.TestCase): out.stop_gradient = False out.backward() - self.assertTrue(np.array_equal(x.numpy(), out.numpy())) + np.testing.assert_array_equal(x.numpy(), out.numpy()) def test_const_attr_value(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/custom_op/test_custom_concat.py b/python/paddle/fluid/tests/custom_op/test_custom_concat.py index 83be96a95a8..ae3022411b1 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_concat.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_concat.py @@ -112,9 +112,10 @@ class TestCustomConcatDynamicAxisJit(unittest.TestCase): self.axises = [0, 1] def check_output(self, out, pd_out, name): - self.assertTrue( - np.array_equal(out, pd_out), - "custom op {}: {},\n paddle api {}: {}".format( + np.testing.assert_array_equal( + out, + pd_out, + err_msg='custom op {}: {},\n paddle api {}: {}'.format( name, out, name, pd_out)) def func_dynamic(self): diff --git a/python/paddle/fluid/tests/custom_op/test_custom_conj.py b/python/paddle/fluid/tests/custom_op/test_custom_conj.py index ea916ff55ec..a389a72df73 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_conj.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_conj.py @@ -97,9 +97,10 @@ class TestCustomConjJit(unittest.TestCase): self.shape = [2, 20, 2, 3] def check_output(self, out, pd_out, name): - self.assertTrue( - np.array_equal(out, pd_out), - "custom op {}: {},\n paddle api {}: {}".format( + np.testing.assert_array_equal( + out, + pd_out, + err_msg='custom op {}: {},\n paddle api {}: {}'.format( name, out, name, pd_out)) def run_dynamic(self, dtype, np_input): diff --git a/python/paddle/fluid/tests/custom_op/test_custom_linear.py b/python/paddle/fluid/tests/custom_op/test_custom_linear.py index 2309751659a..3ae650ee949 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_linear.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_linear.py @@ -97,9 +97,10 @@ class TestCustomLinearJit(unittest.TestCase): self.np_bias = np.ones([4], dtype="float32") def check_output(self, out, pd_out, name): - self.assertTrue( - np.array_equal(out, pd_out), - "custom op {}: {},\n paddle api {}: {}".format( + np.testing.assert_array_equal( + out, + pd_out, + err_msg='custom op {}: {},\n paddle api {}: {}'.format( name, out, name, pd_out)) def test_static(self): diff --git a/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py b/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py index f95f57b4b7a..3cd550c95f0 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py @@ -83,7 +83,7 @@ class TestCustomRawReluOp(unittest.TestCase): y1_value, y2_value = exe.run(paddle.static.default_main_program(), feed={x.name: x_np}, fetch_list=[y1, y2]) - self.assertTrue(np.array_equal(y1_value, y2_value)) + np.testing.assert_array_equal(y1_value, y2_value) paddle.disable_static() diff --git a/python/paddle/fluid/tests/custom_op/test_custom_relu_model.py b/python/paddle/fluid/tests/custom_op/test_custom_relu_model.py index ff0b11128a4..18f3252e6e1 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_relu_model.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_relu_model.py @@ -121,12 +121,11 @@ class TestDygraphModel(unittest.TestCase): if _in_legacy_dygraph(): custom_relu_dy2stat_train_out = self.train_model( use_custom_op=True, dy2stat=True) # for to_static - self.assertTrue( - np.array_equal(origin_relu_train_out, - custom_relu_dy2stat_train_out)) + np.testing.assert_array_equal(origin_relu_train_out, + custom_relu_dy2stat_train_out) - self.assertTrue( - np.array_equal(origin_relu_train_out, custom_relu_train_out)) + np.testing.assert_array_equal(origin_relu_train_out, + custom_relu_train_out) # for eval origin_relu_eval_out = self.eval_model(use_custom_op=False) @@ -134,12 +133,11 @@ class TestDygraphModel(unittest.TestCase): if _in_legacy_dygraph(): custom_relu_dy2stat_eval_out = self.eval_model( use_custom_op=True, dy2stat=True) # for to_static - self.assertTrue( - np.array_equal(origin_relu_eval_out, - custom_relu_dy2stat_eval_out)) + np.testing.assert_array_equal(origin_relu_eval_out, + custom_relu_dy2stat_eval_out) - self.assertTrue( - np.array_equal(origin_relu_eval_out, custom_relu_eval_out)) + np.testing.assert_array_equal(origin_relu_eval_out, + custom_relu_eval_out) def test_train_eval(self): with _test_eager_guard(): @@ -243,11 +241,10 @@ class TestStaticModel(unittest.TestCase): use_custom_op=True, use_pe=True) - self.assertTrue( - np.array_equal(original_relu_train_out, custom_relu_train_out)) - self.assertTrue( - np.array_equal(original_relu_train_pe_out, - custom_relu_train_pe_out)) + np.testing.assert_array_equal(original_relu_train_out, + custom_relu_train_out) + np.testing.assert_array_equal(original_relu_train_pe_out, + custom_relu_train_pe_out) # for eval original_relu_eval_out = self.eval_model(device, @@ -261,11 +258,10 @@ class TestStaticModel(unittest.TestCase): use_custom_op=True, use_pe=True) - self.assertTrue( - np.array_equal(original_relu_eval_out, custom_relu_eval_out)) - self.assertTrue( - np.array_equal(original_relu_eval_pe_out, - custom_relu_eval_pe_out)) + np.testing.assert_array_equal(original_relu_eval_out, + custom_relu_eval_out) + np.testing.assert_array_equal(original_relu_eval_pe_out, + custom_relu_eval_pe_out) def train_model(self, device, use_custom_op=False, use_pe=False): # reset random seed diff --git a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py index 5052a0989bb..f01a737a3b1 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py @@ -71,10 +71,11 @@ class TestJITLoad(unittest.TestCase): out = custom_relu_static(custom_op, device, dtype, x) pd_out = custom_relu_static(custom_op, device, dtype, x, False) - self.assertTrue( - np.array_equal(out, pd_out), - "custom op out: {},\n paddle api out: {}".format( - out, pd_out)) + np.testing.assert_array_equal( + out, + pd_out, + err_msg='custom op out: {},\n paddle api out: {}'. + format(out, pd_out)) def func_dynamic(self): for device in self.devices: @@ -87,14 +88,16 @@ class TestJITLoad(unittest.TestCase): x) pd_out, pd_x_grad = custom_relu_dynamic( custom_op, device, dtype, x, False) - self.assertTrue( - np.array_equal(out, pd_out), - "custom op out: {},\n paddle api out: {}".format( - out, pd_out)) - self.assertTrue( - np.array_equal(x_grad, pd_x_grad), - "custom op x grad: {},\n paddle api x grad: {}".format( - x_grad, pd_x_grad)) + np.testing.assert_array_equal( + out, + pd_out, + err_msg='custom op out: {},\n paddle api out: {}'. + format(out, pd_out)) + np.testing.assert_array_equal( + x_grad, + pd_x_grad, + err_msg='custom op x grad: {},\n paddle api x grad: {}'. + format(x_grad, pd_x_grad)) def test_dynamic(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py index 1a53bf3354f..0cc1b19e654 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py @@ -224,10 +224,11 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): out = custom_relu_static(custom_op, device, dtype, x) pd_out = custom_relu_static(custom_op, device, dtype, x, False) - self.assertTrue( - np.array_equal(out, pd_out), - "custom op out: {},\n paddle api out: {}".format( - out, pd_out)) + np.testing.assert_array_equal( + out, + pd_out, + err_msg='custom op out: {},\n paddle api out: {}'. + format(out, pd_out)) def test_static_pe(self): for device in self.devices: @@ -239,10 +240,11 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): out = custom_relu_static_pe(custom_op, device, dtype, x) pd_out = custom_relu_static_pe(custom_op, device, dtype, x, False) - self.assertTrue( - np.array_equal(out, pd_out), - "custom op out: {},\n paddle api out: {}".format( - out, pd_out)) + np.testing.assert_array_equal( + out, + pd_out, + err_msg='custom op out: {},\n paddle api out: {}'. + format(out, pd_out)) def func_dynamic(self): for device in self.devices: @@ -255,14 +257,16 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): x) pd_out, pd_x_grad = custom_relu_dynamic( custom_op, device, dtype, x, False) - self.assertTrue( - np.array_equal(out, pd_out), - "custom op out: {},\n paddle api out: {}".format( - out, pd_out)) - self.assertTrue( - np.array_equal(x_grad, pd_x_grad), - "custom op x grad: {},\n paddle api x grad: {}".format( - x_grad, pd_x_grad)) + np.testing.assert_array_equal( + out, + pd_out, + err_msg='custom op out: {},\n paddle api out: {}'. + format(out, pd_out)) + np.testing.assert_array_equal( + x_grad, + pd_x_grad, + err_msg='custom op x grad: {},\n paddle api x grad: {}'. + format(x_grad, pd_x_grad)) def test_dynamic(self): with _test_eager_guard(): @@ -286,10 +290,11 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): predict_infer = exe.run(inference_program, feed={feed_target_names[0]: np_data}, fetch_list=fetch_targets) - self.assertTrue( - np.array_equal(predict, predict_infer), - "custom op predict: {},\n custom op infer predict: {}". - format(predict, predict_infer)) + np.testing.assert_array_equal( + predict, + predict_infer, + err_msg='custom op predict: {},\n custom op infer predict: {}' + .format(predict, predict_infer)) paddle.disable_static() def test_static_save_and_run_inference_predictor(self): @@ -331,14 +336,16 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): self.custom_ops[0], device, dtype, x) pd_out, pd_dx_grad = custom_relu_double_grad_dynamic( self.custom_ops[0], device, dtype, x, False) - self.assertTrue( - np.array_equal(out, pd_out), - "custom op out: {},\n paddle api out: {}".format( + np.testing.assert_array_equal( + out, + pd_out, + err_msg='custom op out: {},\n paddle api out: {}'.format( out, pd_out)) - self.assertTrue( - np.array_equal(dx_grad, pd_dx_grad), - "custom op dx grad: {},\n paddle api dx grad: {}".format( - dx_grad, pd_dx_grad)) + np.testing.assert_array_equal( + dx_grad, + pd_dx_grad, + err_msg='custom op dx grad: {},\n paddle api dx grad: {}'. + format(dx_grad, pd_dx_grad)) def test_with_dataloader(self): for device in self.devices: @@ -357,9 +364,10 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): for batch_id, (image, _) in enumerate(train_loader()): out = self.custom_ops[0](image) pd_out = paddle.nn.functional.relu(image) - self.assertTrue( - np.array_equal(out, pd_out), - "custom op out: {},\n paddle api out: {}".format( + np.testing.assert_array_equal( + out, + pd_out, + err_msg='custom op out: {},\n paddle api out: {}'.format( out, pd_out)) if batch_id == 5: diff --git a/python/paddle/fluid/tests/custom_op/test_custom_simple_slice.py b/python/paddle/fluid/tests/custom_op/test_custom_simple_slice.py index 4202545759c..e7d60fd4296 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_simple_slice.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_simple_slice.py @@ -46,9 +46,11 @@ class TestCustomSimpleSliceJit(unittest.TestCase): x = paddle.to_tensor(np_x) custom_op_out = custom_ops.custom_simple_slice(x, 2, 3) np_out = np_x[2:3] - self.assertTrue( - np.array_equal(custom_op_out, np_out), - "custom op: {},\n numpy: {}".format(np_out, custom_op_out.numpy())) + np.testing.assert_array_equal( + custom_op_out, + np_out, + err_msg='custom op: {},\n numpy: {}'.format(np_out, + custom_op_out.numpy())) def test_slice_output(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/custom_op/test_dispatch_jit.py b/python/paddle/fluid/tests/custom_op/test_dispatch_jit.py index d48d25ea3b1..ff5192db7aa 100644 --- a/python/paddle/fluid/tests/custom_op/test_dispatch_jit.py +++ b/python/paddle/fluid/tests/custom_op/test_dispatch_jit.py @@ -47,9 +47,10 @@ class TestJitDispatch(unittest.TestCase): np_x = x.numpy() np_out = out.numpy() self.assertTrue(dtype in str(np_out.dtype)) - self.assertTrue( - np.array_equal(np_x, np_out), - "custom op x: {},\n custom op out: {}".format(np_x, np_out)) + np.testing.assert_array_equal( + np_x, + np_out, + err_msg='custom op x: {},\n custom op out: {}'.format(np_x, np_out)) def run_dispatch_test(self, func, dtype): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/custom_op/test_multi_out_jit.py b/python/paddle/fluid/tests/custom_op/test_multi_out_jit.py index 83731de32a4..0a0a2e8e6e3 100644 --- a/python/paddle/fluid/tests/custom_op/test_multi_out_jit.py +++ b/python/paddle/fluid/tests/custom_op/test_multi_out_jit.py @@ -70,14 +70,12 @@ class TestMultiOutputDtypes(unittest.TestCase): one_int32 = one_int32.numpy() # Fake_float64 self.assertTrue('float64' in str(zero_float64.dtype)) - self.assertTrue( - np.array_equal(zero_float64, - np.zeros([4, 8]).astype('float64'))) + np.testing.assert_array_equal(zero_float64, + np.zeros([4, 8]).astype('float64')) # ZFake_int32 self.assertTrue('int32' in str(one_int32.dtype)) - self.assertTrue( - np.array_equal(one_int32, - np.ones([4, 8]).astype('int32'))) + np.testing.assert_array_equal(one_int32, + np.ones([4, 8]).astype('int32')) def test_static(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_plugin.py b/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_plugin.py index dcdb7d2d120..bf1effe2191 100644 --- a/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_plugin.py +++ b/python/paddle/fluid/tests/custom_runtime/test_custom_cpu_plugin.py @@ -144,21 +144,21 @@ class TestCustomCPUPlugin(unittest.TestCase): place=paddle.CPUPlace()) custom_cpu_tensor = cpu_tensor._copy_to( paddle.CustomPlace('custom_cpu', 0), True) - self.assertTrue(np.array_equal(custom_cpu_tensor, x)) + np.testing.assert_array_equal(custom_cpu_tensor, x) self.assertTrue(custom_cpu_tensor.place.is_custom_place()) # custom -> custom another_custom_cpu_tensor = custom_cpu_tensor._copy_to( paddle.CustomPlace('custom_cpu', 0), True) - self.assertTrue(np.array_equal(another_custom_cpu_tensor, x)) + np.testing.assert_array_equal(another_custom_cpu_tensor, x) self.assertTrue(another_custom_cpu_tensor.place.is_custom_place()) # custom -> cpu another_cpu_tensor = custom_cpu_tensor._copy_to(paddle.CPUPlace(), True) - self.assertTrue(np.array_equal(another_cpu_tensor, x)) + np.testing.assert_array_equal(another_cpu_tensor, x) self.assertTrue(another_cpu_tensor.place.is_cpu_place()) # custom -> custom self another_custom_cpu_tensor = another_custom_cpu_tensor._copy_to( paddle.CustomPlace('custom_cpu', 0), True) - self.assertTrue(np.array_equal(another_custom_cpu_tensor, x)) + np.testing.assert_array_equal(another_custom_cpu_tensor, x) self.assertTrue(another_custom_cpu_tensor.place.is_custom_place()) def _test_fallback_kernel(self): @@ -168,7 +168,7 @@ class TestCustomCPUPlugin(unittest.TestCase): x = paddle.to_tensor([5, 4, 3], 'int16') y = paddle.to_tensor([1, 2, 3], 'int16') z = paddle.add(x, y) - self.assertTrue(np.array_equal(z, r)) + np.testing.assert_array_equal(z, r) def tearDown(self): del os.environ['CUSTOM_DEVICE_ROOT'] diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index 1d1bb5343a7..420f3d40aff 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -617,9 +617,9 @@ class TestGenerateProposals(LayerTest): roi_probs_dy = roi_probs.numpy() rois_num_dy = rois_num.numpy() - self.assertTrue(np.array_equal(np.array(rois_stat), rois_dy)) - self.assertTrue(np.array_equal(np.array(roi_probs_stat), roi_probs_dy)) - self.assertTrue(np.array_equal(np.array(rois_num_stat), rois_num_dy)) + np.testing.assert_array_equal(np.array(rois_stat), rois_dy) + np.testing.assert_array_equal(np.array(roi_probs_stat), roi_probs_dy) + np.testing.assert_array_equal(np.array(rois_num_stat), rois_num_dy) class TestYoloDetection(unittest.TestCase): @@ -837,8 +837,8 @@ class TestCollectFpnPropsals(LayerTest): fpn_rois_dy = fpn_rois_dy.numpy() rois_num_dy = rois_num_dy.numpy() - self.assertTrue(np.array_equal(fpn_rois_stat, fpn_rois_dy)) - self.assertTrue(np.array_equal(rois_num_stat, rois_num_dy)) + np.testing.assert_array_equal(fpn_rois_stat, fpn_rois_dy) + np.testing.assert_array_equal(rois_num_stat, rois_num_dy) def test_collect_fpn_proposals_error(self): @@ -932,7 +932,7 @@ class TestDistributeFpnProposals(LayerTest): output_dy_np.append(output_np) for res_stat, res_dy in zip(output_stat_np, output_dy_np): - self.assertTrue(np.array_equal(res_stat, res_dy)) + np.testing.assert_array_equal(res_stat, res_dy) def test_distribute_fpn_proposals_error(self): program = Program() diff --git a/python/paddle/fluid/tests/test_lod_tensor.py b/python/paddle/fluid/tests/test_lod_tensor.py index cc97b0eb5ae..eac7feb7775 100644 --- a/python/paddle/fluid/tests/test_lod_tensor.py +++ b/python/paddle/fluid/tests/test_lod_tensor.py @@ -71,11 +71,9 @@ class TestLoDTensor(unittest.TestCase): correct_recursive_seq_lens) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT64) self.assertEqual(tensor.shape(), [5, 1]) - self.assertTrue( - np.array_equal( - np.array(tensor), - np.array([1, 2, 3, 3, - 4]).reshape(tensor.shape()).astype('int64'))) + np.testing.assert_array_equal( + np.array(tensor), + np.array([1, 2, 3, 3, 4]).reshape(tensor.shape()).astype('int64')) # Create LoDTensor from numpy array data = np.random.random([10, 1]).astype('float64') @@ -85,7 +83,7 @@ class TestLoDTensor(unittest.TestCase): recursive_seq_lens) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP64) self.assertEqual(tensor.shape(), [10, 1]) - self.assertTrue(np.array_equal(np.array(tensor), data)) + np.testing.assert_array_equal(np.array(tensor), data) # Create LoDTensor from another LoDTensor, they are differnt instances new_recursive_seq_lens = [[2, 2, 1], [1, 2, 2, 3, 2]] @@ -133,9 +131,9 @@ class TestLoDTensor(unittest.TestCase): dltensor = tensor._to_dlpack() tensor_from_dlpack = fluid.core.from_dlpack(dltensor) self.assertTrue(isinstance(tensor_from_dlpack, fluid.core.Tensor)) - self.assertTrue( - np.array_equal(np.array(tensor_from_dlpack), - np.array([[1], [2], [3], [4]]).astype('int'))) + np.testing.assert_array_equal( + np.array(tensor_from_dlpack), + np.array([[1], [2], [3], [4]]).astype('int')) # when build with cuda if core.is_compiled_with_cuda(): gtensor = fluid.create_lod_tensor( @@ -144,9 +142,9 @@ class TestLoDTensor(unittest.TestCase): gdltensor = gtensor._to_dlpack() gtensor_from_dlpack = fluid.core.from_dlpack(gdltensor) self.assertTrue(isinstance(gtensor_from_dlpack, fluid.core.Tensor)) - self.assertTrue( - np.array_equal(np.array(gtensor_from_dlpack), - np.array([[1], [2], [3], [4]]).astype('int'))) + np.testing.assert_array_equal( + np.array(gtensor_from_dlpack), + np.array([[1], [2], [3], [4]]).astype('int')) def test_as_type(self): tensor = fluid.create_lod_tensor( diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py index b5ccf735ce2..65aec5ca8dd 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_operators.py @@ -164,7 +164,7 @@ class TestChooseShapeAttrOrApiWithLayer(unittest.TestCase): net = ShapeLayer() out = net(x) - self.assertTrue(np.array_equal(out.numpy(), x.numpy())) + np.testing.assert_array_equal(out.numpy(), x.numpy()) class TestIfElseNoValue(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_deepcopy.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_deepcopy.py index dcc12e120d6..ecb2d97fa44 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_deepcopy.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_deepcopy.py @@ -36,7 +36,7 @@ class TestDeepCopy(unittest.TestCase): self.assertFalse(isinstance(net.forward, StaticFunction)) self.assertTrue(id(copy_net), id(copy_net.forward.__self__)) - self.assertTrue(np.array_equal(src_out.numpy(), copy_out.numpy())) + np.testing.assert_array_equal(src_out.numpy(), copy_out.numpy()) def test_func(self): st_foo = paddle.jit.to_static(foo) @@ -48,7 +48,7 @@ class TestDeepCopy(unittest.TestCase): new_foo = deepcopy(st_foo) self.assertFalse(isinstance(new_foo, StaticFunction)) new_out = new_foo(x) - self.assertTrue(np.array_equal(st_out.numpy(), new_out.numpy())) + np.testing.assert_array_equal(st_out.numpy(), new_out.numpy()) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py index 8ecae3c6b8d..560ae6b4ade 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py @@ -201,7 +201,7 @@ class TestPruneUnusedParamInProgram(unittest.TestCase): model.eval() input_ids = paddle.to_tensor(input_ids) out = model(input_ids) - self.assertTrue(np.array_equal(out.numpy(), [[15, 11]])) + np.testing.assert_array_equal(out.numpy(), [[15, 11]]) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_rollback.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_rollback.py index 5277a50c299..f949e9c0da7 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_rollback.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_rollback.py @@ -82,7 +82,7 @@ class TestRollBackPlainFunction(unittest.TestCase): dy_out = st_foo(x) self.assertTrue(func_to_source_code(foo) == func_to_source_code(st_foo)) - self.assertTrue(np.array_equal(st_out.numpy(), dy_out.numpy())) + np.testing.assert_array_equal(st_out.numpy(), dy_out.numpy()) class TestRollBackNet(unittest.TestCase): @@ -111,15 +111,15 @@ class TestRollBackNet(unittest.TestCase): self.assertFalse(isinstance(net.forward, StaticFunction)) self.assertFalse("true_fn" in func_to_source_code(net.sub.forward)) dy_fwd_out = net(x) - self.assertTrue(np.array_equal(st_fwd_out.numpy(), dy_fwd_out.numpy())) + np.testing.assert_array_equal(st_fwd_out.numpy(), dy_fwd_out.numpy()) # rollback infer into original dygraph method net.infer.rollback() self.assertFalse(isinstance(net.infer, StaticFunction)) self.assertFalse("true_fn" in func_to_source_code(net.sub.forward)) dy_infer_out = net.infer(x) - self.assertTrue( - np.array_equal(st_infer_out.numpy(), dy_infer_out.numpy())) + np.testing.assert_array_equal(st_infer_out.numpy(), + dy_infer_out.numpy()) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py index 48dc33cc6c7..7e8390d5443 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py @@ -208,8 +208,8 @@ class TestSliceSupplementSpecialCase(unittest.TestCase): out = exe.run(prog, feed={'x': array}, fetch_list=[z1, z2]) - self.assertTrue(np.array_equal(out[0], array[::2])) - self.assertTrue(np.array_equal(out[1], array[::-2])) + np.testing.assert_array_equal(out[0], array[::2]) + np.testing.assert_array_equal(out[1], array[::-2]) def test_static_slice_step_dygraph2static(self): paddle.disable_static() @@ -225,10 +225,10 @@ class TestSliceSupplementSpecialCase(unittest.TestCase): input_spec=[InputSpec(shape=[None, 4, 4])]) static_result = sfunc(inps) - self.assertTrue( - np.array_equal(origin_result[0].numpy(), static_result[0].numpy())) - self.assertTrue( - np.array_equal(origin_result[1].numpy(), static_result[1].numpy())) + np.testing.assert_array_equal(origin_result[0].numpy(), + static_result[0].numpy()) + np.testing.assert_array_equal(origin_result[1].numpy(), + static_result[1].numpy()) class TestPaddleStridedSlice(unittest.TestCase): @@ -268,10 +268,8 @@ class TestPaddleStridedSlice(unittest.TestCase): ends=e2, strides=stride2) - self.assertTrue( - np.array_equal( - sl.numpy(), array[s2[0]:e2[0]:stride2[0], - s2[1]:e2[1]:stride2[1]])) + np.testing.assert_array_equal( + sl.numpy(), array[s2[0]:e2[0]:stride2[0], s2[1]:e2[1]:stride2[1]]) array = np.arange(6 * 7 * 8).reshape((6, 7, 8)) pt = paddle.to_tensor(array) @@ -285,9 +283,10 @@ class TestPaddleStridedSlice(unittest.TestCase): strides=stride2) array_slice = array[s2[0]:e2[0]:stride2[0], ::, s2[1]:e2[1]:stride2[1]] - self.assertTrue( - np.array_equal(sl.numpy(), array_slice), - msg="paddle.strided_slice:\n {} \n numpy slice:\n{}".format( + np.testing.assert_array_equal( + sl.numpy(), + array_slice, + err_msg='paddle.strided_slice:\n {} \n numpy slice:\n{}'.format( sl.numpy(), array_slice)) diff --git a/python/paddle/fluid/tests/unittests/interpreter/test_standalone_controlflow.py b/python/paddle/fluid/tests/unittests/interpreter/test_standalone_controlflow.py index aa0290cf4b5..ef56e087f50 100644 --- a/python/paddle/fluid/tests/unittests/interpreter/test_standalone_controlflow.py +++ b/python/paddle/fluid/tests/unittests/interpreter/test_standalone_controlflow.py @@ -97,9 +97,9 @@ class TestCompatibility(unittest.TestCase): for x, y in zip(gt, res): if isinstance(x, list): for tx, ty in zip(x, y): - self.assertTrue(np.array_equal(tx, ty)) + np.testing.assert_array_equal(tx, ty) elif isinstance(x, np.ndarray): - self.assertTrue(np.array_equal(tx, ty)) + np.testing.assert_array_equal(tx, ty) else: raise Exception("Not Implement!") diff --git a/python/paddle/fluid/tests/unittests/interpreter/test_standalone_executor.py b/python/paddle/fluid/tests/unittests/interpreter/test_standalone_executor.py index ad13061d178..75741f90aee 100644 --- a/python/paddle/fluid/tests/unittests/interpreter/test_standalone_executor.py +++ b/python/paddle/fluid/tests/unittests/interpreter/test_standalone_executor.py @@ -261,7 +261,7 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase): res = self.run_new_executor(feed) gt = self.run_raw_executor(feed) for x, y in zip(gt, res): - self.assertTrue(np.array_equal(x, y)) + np.testing.assert_array_equal(x, y) def test_with_error(self): feed = [{'a': np.ones([2, 2], dtype="float32")}] @@ -277,7 +277,7 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase): res = self.run_new_executor(feed, use_compiled=True) gt = self.run_raw_executor(feed, use_compiled=True) for x, y in zip(gt, res): - self.assertTrue(np.array_equal(x, y)) + np.testing.assert_array_equal(x, y) def test_compiled_program_convert_graph_to_program(self): data = np.ones([2, 2], dtype="float32") @@ -286,7 +286,7 @@ class SwitchExecutorInterfaceWithFeed(unittest.TestCase): res = self.run_new_executor(feed, use_compiled=True) gt = self.run_raw_executor(feed, use_compiled=True) for x, y in zip(gt, res): - self.assertTrue(np.array_equal(x, y)) + np.testing.assert_array_equal(x, y) def test_empty_program(self): program = paddle.static.Program() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_scale_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_scale_op_mlu.py index aed58a352f4..b7ec5546976 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_scale_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_scale_op_mlu.py @@ -185,7 +185,7 @@ class TestScaleApiStatic(unittest.TestCase): exe = paddle.static.Executor(place=paddle.CPUPlace()) out = exe.run(main_prog, feed={"x": input}, fetch_list=[out]) - self.assertEqual(np.array_equal(out[0], input * 2.0 + 3.0), True) + np.testing.assert_array_equal(out[0], input * 2.0 + 3.0) class TestScaleInplaceApiStatic(TestScaleApiStatic): @@ -204,7 +204,7 @@ class TestScaleApiDygraph(unittest.TestCase): input = np.random.random([2, 25]).astype("float32") x = paddle.to_tensor(input) out = self._executed_api(x, scale=2.0, bias=3.0) - self.assertEqual(np.array_equal(out.numpy(), input * 2.0 + 3.0), True) + np.testing.assert_array_equal(out.numpy(), input * 2.0 + 3.0) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py index bcb41283de9..db2a08a0312 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py @@ -356,13 +356,13 @@ class TestMoveAxis(unittest.TestCase): exe = paddle.static.Executor() out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0] - self.assertEqual(np.array_equal(out_np, expected), True) + np.testing.assert_array_equal(out_np, expected) paddle.disable_static() x = paddle.to_tensor(x_np) out = paddle.moveaxis(x, [0, 4, 3, 2], [1, 3, 2, 0]) self.assertEqual(out.shape, [4, 2, 5, 7, 3]) - self.assertEqual(np.array_equal(out.numpy(), expected), True) + np.testing.assert_array_equal(out.numpy(), expected) paddle.enable_static() def test_moveaxis2(self): @@ -376,13 +376,13 @@ class TestMoveAxis(unittest.TestCase): exe = paddle.static.Executor() out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0] - self.assertEqual(np.array_equal(out_np, expected), True) + np.testing.assert_array_equal(out_np, expected) paddle.disable_static() x = paddle.to_tensor(x_np) out = x.moveaxis(-2, -1) self.assertEqual(out.shape, [2, 5, 3]) - self.assertEqual(np.array_equal(out.numpy(), expected), True) + np.testing.assert_array_equal(out.numpy(), expected) paddle.enable_static() def test_error(self): diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index b0274431d45..dad503660ea 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -1048,12 +1048,13 @@ class OpTest(unittest.TestCase): str(expect_out) + "\n" + "But Got" + str(actual_out) + " in class " + self.__class__.__name__) else: - self.assertTrue( - np.array_equal(expect_out, actual_out), - "Output (" + name + ") has diff at " + str(place) + - " when using and not using inplace" + "\nExpect " + - str(expect_out) + "\n" + "But Got" + str(actual_out) + - " in class " + self.__class__.__name__ + '\n') + np.testing.assert_array_equal( + expect_out, + actual_out, + err_msg='Output (' + name + ') has diff at ' + str(place) + + ' when using and not using inplace' + '\nExpect ' + + str(expect_out) + '\n' + 'But Got' + str(actual_out) + + ' in class ' + self.__class__.__name__ + '\n') def _construct_grad_program_from_forward(self, fwd_program, grad_op_desc, op_grad_to_var): diff --git a/python/paddle/fluid/tests/unittests/test_activation_sparse_op.py b/python/paddle/fluid/tests/unittests/test_activation_sparse_op.py index cbc32bbc4a1..9c6a9412518 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_sparse_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_sparse_op.py @@ -51,7 +51,7 @@ class TestSparseSquareOp(unittest.TestCase): # get and compare result result_array = np.array(out_selected_rows.get_tensor()) - self.assertTrue(np.array_equal(result_array, np.square(np_array))) + np.testing.assert_array_equal(result_array, np.square(np_array)) def test_sparse_acti(self): places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index d90ae197783..116924544fc 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -214,9 +214,9 @@ class TestAssignOApi(unittest.TestCase): y = clone_x**3 y.backward() - self.assertTrue(np.array_equal(x, [1, 1]), True) - self.assertTrue(np.array_equal(clone_x.grad.numpy(), [3, 3]), True) - self.assertTrue(np.array_equal(x.grad.numpy(), [3, 3]), True) + np.testing.assert_array_equal(x, [1, 1]) + np.testing.assert_array_equal(clone_x.grad.numpy(), [3, 3]) + np.testing.assert_array_equal(x.grad.numpy(), [3, 3]) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) paddle.enable_static() @@ -229,7 +229,7 @@ class TestAssignOApi(unittest.TestCase): feed={'X': x_np}, fetch_list=[clone_x])[0] - self.assertTrue(np.array_equal(y_np, x_np), True) + np.testing.assert_array_equal(y_np, x_np) paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_assign_value_op.py b/python/paddle/fluid/tests/unittests/test_assign_value_op.py index 681e1893b02..6c6c26a8c6a 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_value_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_value_op.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest -import numpy +import numpy as np import op_test import paddle @@ -39,7 +39,7 @@ class TestAssignValueOp(op_test.OpTest): self.outputs = {"Out": self.value} def init_data(self): - self.value = numpy.random.random(size=(2, 5)).astype(numpy.float32) + self.value = np.random.random(size=(2, 5)).astype(np.float32) self.attrs["fp32_values"] = [float(v) for v in self.value.flat] def test_forward(self): @@ -49,22 +49,22 @@ class TestAssignValueOp(op_test.OpTest): class TestAssignValueOp2(TestAssignValueOp): def init_data(self): - self.value = numpy.random.random(size=(2, 5)).astype(numpy.int32) + self.value = np.random.random(size=(2, 5)).astype(np.int32) self.attrs["int32_values"] = [int(v) for v in self.value.flat] class TestAssignValueOp3(TestAssignValueOp): def init_data(self): - self.value = numpy.random.random(size=(2, 5)).astype(numpy.int64) + self.value = np.random.random(size=(2, 5)).astype(np.int64) self.attrs["int64_values"] = [int(v) for v in self.value.flat] class TestAssignValueOp4(TestAssignValueOp): def init_data(self): - self.value = numpy.random.choice(a=[False, True], - size=(2, 5)).astype(numpy.bool) + self.value = np.random.choice(a=[False, True], + size=(2, 5)).astype(np.bool) self.attrs["bool_values"] = [int(v) for v in self.value.flat] @@ -72,7 +72,7 @@ class TestAssignApi(unittest.TestCase): def setUp(self): self.init_dtype() - self.value = (-100 + 200 * numpy.random.random(size=(2, 5))).astype( + self.value = (-100 + 200 * np.random.random(size=(2, 5))).astype( self.dtype) self.place = fluid.CUDAPlace( 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() @@ -88,8 +88,10 @@ class TestAssignApi(unittest.TestCase): exe = fluid.Executor(self.place) [fetched_x] = exe.run(main_program, feed={}, fetch_list=[x]) - self.assertTrue(numpy.array_equal(fetched_x, self.value), - "fetch_x=%s val=%s" % (fetched_x, self.value)) + np.testing.assert_array_equal(fetched_x, + self.value, + err_msg='fetch_x=%s val=%s' % + (fetched_x, self.value)) self.assertEqual(fetched_x.dtype, self.value.dtype) @@ -109,8 +111,8 @@ class TestAssignApi4(TestAssignApi): def setUp(self): self.init_dtype() - self.value = numpy.random.choice(a=[False, True], - size=(2, 5)).astype(numpy.bool) + self.value = np.random.choice(a=[False, True], + size=(2, 5)).astype(np.bool) self.place = fluid.CUDAPlace( 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_base_layer.py b/python/paddle/fluid/tests/unittests/test_base_layer.py index dbfb1844fb0..bf21d29caa0 100644 --- a/python/paddle/fluid/tests/unittests/test_base_layer.py +++ b/python/paddle/fluid/tests/unittests/test_base_layer.py @@ -371,7 +371,7 @@ class TestBuffer(unittest.TestCase): self.func_test_buffer_state_dict() def assert_var_base_equal(self, var1, var2): - self.assertTrue(np.array_equal(var1.numpy(), var2.numpy())) + np.testing.assert_array_equal(var1.numpy(), var2.numpy()) class BufferNetWithModification(paddle.nn.Layer): @@ -414,8 +414,8 @@ class TestModifiedBuffer(unittest.TestCase): st_outs = self._run(True) for i in range(len(dy_outs)): - self.assertTrue( - np.array_equal(dy_outs[i].numpy(), st_outs[i].numpy())) + np.testing.assert_array_equal(dy_outs[i].numpy(), + st_outs[i].numpy()) def test_modified(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py b/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py index b3206e385f4..59a3ff34a02 100644 --- a/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py +++ b/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py @@ -91,9 +91,8 @@ class TestBeamSearchDecodeOp(unittest.TestCase): expected_data = np.array( [0, 2, 3, 1, 0, 2, 1, 0, 4, 5, 3, 5, 0, 4, 5, 3, 1], "int64") - self.assertTrue(np.array_equal(np.array(sentence_ids), expected_data)) - self.assertTrue(np.array_equal(np.array(sentence_scores), - expected_data)) + np.testing.assert_array_equal(np.array(sentence_ids), expected_data) + np.testing.assert_array_equal(np.array(sentence_scores), expected_data) @unittest.skipIf(not core.is_compiled_with_cuda(), diff --git a/python/paddle/fluid/tests/unittests/test_bernoulli_op.py b/python/paddle/fluid/tests/unittests/test_bernoulli_op.py index 4982ed451cd..8ab6968eb21 100644 --- a/python/paddle/fluid/tests/unittests/test_bernoulli_op.py +++ b/python/paddle/fluid/tests/unittests/test_bernoulli_op.py @@ -90,7 +90,7 @@ class TestRandomValue(unittest.TestCase): self.assertEqual(np.sum(index1), 8582429431) self.assertEqual(np.sum(index2), 8581445798) expect = [0., 0., 0., 0., 0., 0., 0., 1., 1., 1.] - self.assertTrue(np.array_equal(y[16, 500, 500:510], expect)) + np.testing.assert_array_equal(y[16, 500, 500:510], expect) x = paddle.to_tensor(x_np, dtype='float32') y = paddle.bernoulli(x).numpy() @@ -99,7 +99,7 @@ class TestRandomValue(unittest.TestCase): self.assertEqual(np.sum(index1), 8583509076) self.assertEqual(np.sum(index2), 8582778540) expect = [0., 0., 1., 1., 1., 1., 0., 1., 1., 1.] - self.assertTrue(np.array_equal(y[16, 500, 500:510], expect)) + np.testing.assert_array_equal(y[16, 500, 500:510], expect) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py index ffc17318472..f88f0506ddd 100644 --- a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py +++ b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py @@ -118,9 +118,11 @@ class InplaceTestBase(unittest.TestCase): fetch_val2, = exe.run(compiled_prog, feed=feed_dict, fetch_list=[fetch_var]) - self.assertTrue( - np.array_equal(fetch_val1, fetch_val2), - "error var name: {}, fetch_val1: {}, fetch_val2: {}" + np.testing.assert_array_equal( + fetch_val1, + fetch_val2, + err_msg= + 'error var name: {}, fetch_val1: {}, fetch_val2: {}' .format( fetch_var, fetch_val1[~np.equal(fetch_val1, fetch_val2)], @@ -167,13 +169,14 @@ class InplaceTestBase(unittest.TestCase): fetch_vals.append(fetch_val) for item in fetch_vals: - self.assertTrue(np.array_equal(fetch_vals[0], item)) - self.assertTrue( - np.array_equal(fetch_vals[0], item), - "error var name: {}, fetch_vals[0]: {}, item: {}". - format(fetch_var, - fetch_vals[0][~np.equal(fetch_vals[0], item)], - item[~np.equal(fetch_vals[0], item)])) + np.testing.assert_array_equal(fetch_vals[0], item) + np.testing.assert_array_equal( + fetch_vals[0], + item, + err_msg='error var name: {}, fetch_vals[0]: {}, item: {}' + .format(fetch_var, + fetch_vals[0][~np.equal(fetch_vals[0], item)], + item[~np.equal(fetch_vals[0], item)])) class CUDAInplaceTest(InplaceTestBase): diff --git a/python/paddle/fluid/tests/unittests/test_calc_gradient.py b/python/paddle/fluid/tests/unittests/test_calc_gradient.py index 92eb3589625..f45263bab3c 100644 --- a/python/paddle/fluid/tests/unittests/test_calc_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_calc_gradient.py @@ -100,7 +100,7 @@ class TestGradientWithPrune(unittest.TestCase): out = exe.run(main, feed={'x': np.ones([3]).astype('float32')}, fetch_list=[x1_grad]) - self.assertTrue(np.array_equal(out[0], [2., 0., 0.])) + np.testing.assert_array_equal(out[0], [2.0, 0.0, 0.0]) class TestDoubleGradient(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_cast_op.py b/python/paddle/fluid/tests/unittests/test_cast_op.py index 6e9c9bcd147..cd67440990c 100644 --- a/python/paddle/fluid/tests/unittests/test_cast_op.py +++ b/python/paddle/fluid/tests/unittests/test_cast_op.py @@ -130,11 +130,10 @@ class TestCastOpEager(unittest.TestCase): x = paddle.ones([2, 2], dtype="float16") x.stop_gradient = False out = paddle.cast(x, "float32") - self.assertTrue( - np.array_equal(out, - np.ones([2, 2]).astype("float32"))) + np.testing.assert_array_equal(out, + np.ones([2, 2]).astype('float32')) out.backward() - self.assertTrue(np.array_equal(x.gradient(), x.numpy())) + np.testing.assert_array_equal(x.gradient(), x.numpy()) self.assertTrue(x.gradient().dtype == np.float16) diff --git a/python/paddle/fluid/tests/unittests/test_compiled_program.py b/python/paddle/fluid/tests/unittests/test_compiled_program.py index fab70b2c6ad..1ca2034625b 100644 --- a/python/paddle/fluid/tests/unittests/test_compiled_program.py +++ b/python/paddle/fluid/tests/unittests/test_compiled_program.py @@ -68,7 +68,7 @@ class TestCompiledProgram(unittest.TestCase): "label": self.label }, fetch_list=[loss.name]) - self.assertTrue(np.array_equal(loss_data[0], self.loss)) + np.testing.assert_array_equal(loss_data[0], self.loss) def test_compiled_program_with_data_parallel(self): with new_program_scope(): @@ -90,7 +90,7 @@ class TestCompiledProgram(unittest.TestCase): "label": self.label }, fetch_list=[loss.name]) - self.assertTrue(np.array_equal(loss_data[0], self.loss)) + np.testing.assert_array_equal(loss_data[0], self.loss) class TestCompiledProgramError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index 130a7e8833b..0bf3d6230d8 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -447,10 +447,8 @@ class TestConcatAPIWithLoDTensorArray(unittest.TestCase): self.assertTrue(self.out_var.shape[self.axis] == -1) exe = fluid.Executor(self.place) res = exe.run(self.program, fetch_list=self.out_var) - self.assertTrue( - np.array_equal( - res[0], np.concatenate([self.x] * self.iter_num, - axis=self.axis))) + np.testing.assert_array_equal( + res[0], np.concatenate([self.x] * self.iter_num, axis=self.axis)) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_conj_op.py b/python/paddle/fluid/tests/unittests/test_conj_op.py index a3b3f243260..d1b8de82bee 100644 --- a/python/paddle/fluid/tests/unittests/test_conj_op.py +++ b/python/paddle/fluid/tests/unittests/test_conj_op.py @@ -84,7 +84,7 @@ class TestComplexConjOp(unittest.TestCase): var_x = paddle.to_tensor(input) result = paddle.conj(var_x).numpy() target = np.conj(input) - self.assertTrue(np.array_equal(result, target)) + np.testing.assert_array_equal(result, target) def test_conj_operator(self): for dtype in self._dtypes: @@ -96,7 +96,7 @@ class TestComplexConjOp(unittest.TestCase): var_x = paddle.to_tensor(input) result = var_x.conj().numpy() target = np.conj(input) - self.assertTrue(np.array_equal(result, target)) + np.testing.assert_array_equal(result, target) def test_conj_static_mode(self): @@ -118,7 +118,7 @@ class TestComplexConjOp(unittest.TestCase): exe = static.Executor(place) out_value = exe.run(feed=input_dict, fetch_list=[out.name]) - self.assertTrue(np.array_equal(np_res, out_value[0])) + np.testing.assert_array_equal(np_res, out_value[0]) def test_conj_api_real_number(self): for dtype in self._dtypes: @@ -128,7 +128,7 @@ class TestComplexConjOp(unittest.TestCase): var_x = paddle.to_tensor(input) result = paddle.conj(var_x).numpy() target = np.conj(input) - self.assertTrue(np.array_equal(result, target)) + np.testing.assert_array_equal(result, target) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_cuda_graph.py b/python/paddle/fluid/tests/unittests/test_cuda_graph.py index 446a5500bc3..0b795d4c0eb 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_graph.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_graph.py @@ -174,10 +174,10 @@ class TestCUDAGraph(unittest.TestCase): y_np = y.numpy() y_np_expected = np.concatenate(xs_np) - self.assertTrue(np.array_equal(y_np, y_np_expected)) + np.testing.assert_array_equal(y_np, y_np_expected) self.assertEqual(len(zs), len(xs_np)) for i, z in enumerate(zs): - self.assertTrue(np.array_equal(z.numpy(), xs_np[i])) + np.testing.assert_array_equal(z.numpy(), xs_np[i]) output_dir = 'cuda_graph_dot_{}'.format(os.getpid()) try: @@ -233,8 +233,8 @@ class TestCUDAGraph(unittest.TestCase): graph.replay() actual_x = np.array([[i]]).astype(dtype) actual_y = np.array([[i * i]]).astype(dtype) - self.assertTrue(np.array_equal(actual_x, x.numpy())) - self.assertTrue(np.array_equal(actual_y, y.numpy())) + np.testing.assert_array_equal(actual_x, x.numpy()) + np.testing.assert_array_equal(actual_y, y.numpy()) def test_dev_ctx_alloc(self): if not can_use_cuda_graph(): diff --git a/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph.py b/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph.py index b0e6878e3fe..ecd3f406e08 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_graph_partial_graph.py @@ -68,9 +68,9 @@ class TestSimpleModel(unittest.TestCase): layer, value2 = self.run_base(func, True, "default") _, value3 = self.run_base(func, True, "new") _, value4 = self.run_base(func, True, layer) - self.assertTrue(np.array_equal(value1, value2)) - self.assertTrue(np.array_equal(value1, value3)) - self.assertTrue(np.array_equal(value1, value4)) + np.testing.assert_array_equal(value1, value2) + np.testing.assert_array_equal(value1, value3) + np.testing.assert_array_equal(value1, value4) def test_layer(self): self.check(SimpleModel(10, 20)) diff --git a/python/paddle/fluid/tests/unittests/test_cuda_stream_event.py b/python/paddle/fluid/tests/unittests/test_cuda_stream_event.py index 5405ca19806..87c4f6cee5b 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_stream_event.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_stream_event.py @@ -128,7 +128,7 @@ class TestStreamGuard(unittest.TestCase): # kernels to be completed on windows. s.synchronize() - self.assertTrue(np.array_equal(np.array(c), np.array(d))) + np.testing.assert_array_equal(np.array(c), np.array(d)) def test_stream_guard_default_stream(self): if paddle.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index 7e11ad647d9..1989a8c1448 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -31,15 +31,15 @@ class TestCumsumOp(unittest.TestCase): y = paddle.cumsum(data) z = np.cumsum(data_np) - self.assertTrue(np.array_equal(z, y.numpy())) + np.testing.assert_array_equal(z, y.numpy()) y = paddle.cumsum(data, axis=0) z = np.cumsum(data_np, axis=0) - self.assertTrue(np.array_equal(z, y.numpy())) + np.testing.assert_array_equal(z, y.numpy()) y = paddle.cumsum(data, axis=-1) z = np.cumsum(data_np, axis=-1) - self.assertTrue(np.array_equal(z, y.numpy())) + np.testing.assert_array_equal(z, y.numpy()) y = paddle.cumsum(data, dtype='float64') self.assertTrue(y.dtype == core.VarDesc.VarType.FP64) @@ -49,7 +49,7 @@ class TestCumsumOp(unittest.TestCase): y = paddle.cumsum(data, axis=-2) z = np.cumsum(data_np, axis=-2) - self.assertTrue(np.array_equal(z, y.numpy())) + np.testing.assert_array_equal(z, y.numpy()) def run_static(self, use_gpu=False): with fluid.program_guard(fluid.Program()): diff --git a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py index e2062238b11..2f7a6dd5a42 100644 --- a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py +++ b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py @@ -93,8 +93,8 @@ class TestClass(unittest.TestCase): L1 = np.array(L1) L2 = np.array(L2) - self.assertTrue(np.array_equal(I1, I2)) - self.assertTrue(np.array_equal(L1, L2)) + np.testing.assert_array_equal(I1, I2) + np.testing.assert_array_equal(L1, L2) batch_id += 1 if break_beforehand and batch_id >= int( diff --git a/python/paddle/fluid/tests/unittests/test_dot_op.py b/python/paddle/fluid/tests/unittests/test_dot_op.py index ffdc90dd986..1ce352251c1 100644 --- a/python/paddle/fluid/tests/unittests/test_dot_op.py +++ b/python/paddle/fluid/tests/unittests/test_dot_op.py @@ -138,9 +138,8 @@ class TestDygraph(unittest.TestCase): np.array([[1, 3], [3, 5]]).astype(np.float32)) y1 = fluid.dygraph.to_variable( np.array([[2, 5], [6, 8]]).astype(np.float32)) - self.assertTrue( - np.array_equal( - paddle.dot(x1, y1).numpy(), np.array([[17], [58]]))) + np.testing.assert_array_equal( + paddle.dot(x1, y1).numpy(), np.array([[17], [58]])) class TestComplexDotOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index 30c4201a0b9..a5f33288362 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -1013,10 +1013,9 @@ class TestDropoutBackward(unittest.TestCase): out, mask = core.ops.dropout(input, 'dropout_prob', 0.5) out.backward() - self.assertTrue( - np.array_equal( - input.gradient(), - self.cal_grad_downscale_in_infer(mask.numpy()))) + np.testing.assert_array_equal( + input.gradient(), + self.cal_grad_downscale_in_infer(mask.numpy())) def test_backward_downscale_in_infer_eager(self): for place in self.places: @@ -1027,10 +1026,9 @@ class TestDropoutBackward(unittest.TestCase): out, mask = _C_ops.final_state_dropout( input, None, 0.5, False, "downgrade_in_infer", 0, False) out.backward() - self.assertTrue( - np.array_equal( - input.gradient(), - self.cal_grad_downscale_in_infer(mask.numpy()))) + np.testing.assert_array_equal( + input.gradient(), + self.cal_grad_downscale_in_infer(mask.numpy())) def test_backward_upscale_train(self): _enable_legacy_dygraph() diff --git a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py index 167748c5a98..22fb98a7c61 100644 --- a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py @@ -82,7 +82,7 @@ class TestDynRNNStopGradient(unittest.TestCase): value2 = build_and_run_program(place, self.batch_size, self.beam_size, True) - self.assertTrue(np.array_equal(value1, value2)) + np.testing.assert_array_equal(value1, value2) def test_check_main(self): places = [fluid.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_eager_run_program.py b/python/paddle/fluid/tests/unittests/test_eager_run_program.py index 8d3ebcfbac5..a04c544e902 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_run_program.py +++ b/python/paddle/fluid/tests/unittests/test_eager_run_program.py @@ -108,11 +108,11 @@ class TestRunProgram(unittest.TestCase): loss = paddle.mean(out_t) loss.backward() - self.assertTrue(np.array_equal(np.ones([2, 2]) * 4, out_t.numpy())) - self.assertTrue( - np.array_equal(np.ones([2, 4]) * 0.5, x_t.grad.numpy())) - self.assertTrue( - np.array_equal(np.ones([4, 2]) * 0.5, y_t.grad.numpy())) + np.testing.assert_array_equal(np.ones([2, 2]) * 4, out_t.numpy()) + np.testing.assert_array_equal( + np.ones([2, 4]) * 0.5, x_t.grad.numpy()) + np.testing.assert_array_equal( + np.ones([4, 2]) * 0.5, y_t.grad.numpy()) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py b/python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py index 4afbe2d7155..26702d682d1 100644 --- a/python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py +++ b/python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py @@ -32,7 +32,7 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase): out_arr = out.numpy() out_arr_expected = np.add(np_x, np_y) - self.assertTrue(np.array_equal(out_arr, out_arr_expected)) + np.testing.assert_array_equal(out_arr, out_arr_expected) def test_sum(self): with _test_eager_guard(): @@ -42,7 +42,7 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase): out = paddle.sum(x, axis=0) out_arr = out.numpy() out_arr_expected = np.sum(x_data, axis=0) - self.assertTrue(np.array_equal(out_arr, out_arr_expected)) + np.testing.assert_array_equal(out_arr, out_arr_expected) def test_mm(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_egr_python_api.py b/python/paddle/fluid/tests/unittests/test_egr_python_api.py index 7fe755225f4..5a678075a11 100644 --- a/python/paddle/fluid/tests/unittests/test_egr_python_api.py +++ b/python/paddle/fluid/tests/unittests/test_egr_python_api.py @@ -54,7 +54,7 @@ class EagerScaleTestCase(unittest.TestCase): self.assertIsNone(data_eager.grad) out_eager.backward(grad_eager, False) self.assertIsNotNone(data_eager.grad) - self.assertTrue(np.array_equal(data_eager.grad.numpy(), input_data)) + np.testing.assert_array_equal(data_eager.grad.numpy(), input_data) def test_retain_grad_and_run_backward_raises(self): with _test_eager_guard(): @@ -92,7 +92,7 @@ class EagerDtypeTestCase(unittest.TestCase): arr = np.random.random([4, 16, 16, 32]).astype(dtype) tensor = paddle.to_tensor(arr, dtype) self.assertEqual(tensor.dtype, proto_dtype) - self.assertTrue(np.array_equal(arr, tensor.numpy())) + np.testing.assert_array_equal(arr, tensor.numpy()) def test_dtype_base(self): print("Test_dtype") @@ -138,7 +138,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor1.stop_gradient, False) self.assertTrue(egr_tensor1.place._equals(place)) - self.assertTrue(np.array_equal(egr_tensor1.numpy(), arr0)) + np.testing.assert_array_equal(egr_tensor1.numpy(), arr0) arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64) egr_tensor2 = core.eager.Tensor(arr1, place, False, True, @@ -149,7 +149,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.INT64) self.assertEqual(egr_tensor2.stop_gradient, True) self.assertTrue(egr_tensor2.place._equals(place)) - self.assertTrue(np.array_equal(egr_tensor2.numpy(), arr1)) + np.testing.assert_array_equal(egr_tensor2.numpy(), arr1) arr2 = np.random.rand(4, 16, 16, 32, 64).astype('float32') egr_tensor3 = core.eager.Tensor(arr2) @@ -161,7 +161,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue( egr_tensor3.place._equals( paddle.fluid.framework._current_expected_place())) - self.assertTrue(np.array_equal(egr_tensor3.numpy(), arr2)) + np.testing.assert_array_equal(egr_tensor3.numpy(), arr2) egr_tensor3.stop_gradient = False egr_tensor4 = core.eager.Tensor(egr_tensor3) @@ -173,8 +173,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue( egr_tensor4.place._equals( paddle.fluid.framework._current_expected_place())) - self.assertTrue(np.array_equal(egr_tensor4.numpy(), - egr_tensor3.numpy())) + np.testing.assert_array_equal(egr_tensor4.numpy(), egr_tensor3.numpy()) arr4 = np.random.rand(4, 16, 16, 32).astype('float32') egr_tensor5 = core.eager.Tensor(arr4, place) @@ -184,7 +183,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor5.stop_gradient, True) self.assertTrue(egr_tensor5.place._equals(place)) - self.assertTrue(np.array_equal(egr_tensor5.numpy(), arr4)) + np.testing.assert_array_equal(egr_tensor5.numpy(), arr4) egr_tensor6 = core.eager.Tensor(egr_tensor5, core.CPUPlace()) self.assertEqual(egr_tensor6.persistable, False) @@ -193,8 +192,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor6.stop_gradient, True) self.assertEqual(egr_tensor6.place.is_cpu_place(), True) - self.assertTrue(np.array_equal(egr_tensor6.numpy(), - egr_tensor5.numpy())) + np.testing.assert_array_equal(egr_tensor6.numpy(), egr_tensor5.numpy()) egr_tensor7 = core.eager.Tensor(arr4, place, True) self.assertEqual(egr_tensor7.persistable, True) @@ -203,7 +201,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor7.stop_gradient, True) self.assertTrue(egr_tensor7.place._equals(place)) - self.assertTrue(np.array_equal(egr_tensor7.numpy(), arr4)) + np.testing.assert_array_equal(egr_tensor7.numpy(), arr4) egr_tensor8 = core.eager.Tensor(egr_tensor6, place, "egr_tensor8") self.assertEqual(egr_tensor8.persistable, False) @@ -212,8 +210,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor8.stop_gradient, True) self.assertTrue(egr_tensor8.place._equals(place)) - self.assertTrue(np.array_equal(egr_tensor8.numpy(), - egr_tensor5.numpy())) + np.testing.assert_array_equal(egr_tensor8.numpy(), egr_tensor5.numpy()) egr_tensor9 = core.eager.Tensor(arr4, place, True, True) self.assertEqual(egr_tensor9.persistable, True) @@ -222,7 +219,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor9.stop_gradient, True) self.assertTrue(egr_tensor9.place._equals(place)) - self.assertTrue(np.array_equal(egr_tensor9.numpy(), arr4)) + np.testing.assert_array_equal(egr_tensor9.numpy(), arr4) x = np.random.rand(3, 3).astype('float32') t = paddle.fluid.Tensor() @@ -234,7 +231,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor10.stop_gradient, True) self.assertTrue(egr_tensor10.place._equals(place)) - self.assertTrue(np.array_equal(egr_tensor10.numpy(), x)) + np.testing.assert_array_equal(egr_tensor10.numpy(), x) egr_tensor11 = core.eager.Tensor(t, place, "framework_constructed") self.assertEqual(egr_tensor11.persistable, False) @@ -243,7 +240,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor11.stop_gradient, True) self.assertTrue(egr_tensor11.place._equals(place)) - self.assertTrue(np.array_equal(egr_tensor11.numpy(), x)) + np.testing.assert_array_equal(egr_tensor11.numpy(), x) egr_tensor12 = core.eager.Tensor(t) self.assertEqual(egr_tensor12.persistable, False) @@ -252,7 +249,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor12.stop_gradient, True) self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace())) - self.assertTrue(np.array_equal(egr_tensor12.numpy(), x)) + np.testing.assert_array_equal(egr_tensor12.numpy(), x) with self.assertRaisesRegexp( ValueError, "The shape of Parameter should not be None"): @@ -489,8 +486,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue( egr_tensor15.place._equals( paddle.fluid.framework._current_expected_place())) - self.assertTrue( - np.array_equal(egr_tensor15.numpy(), egr_tensor4.numpy())) + np.testing.assert_array_equal(egr_tensor15.numpy(), egr_tensor4.numpy()) egr_tensor16 = core.eager.Tensor(value=egr_tensor4, name="new_eager_tensor") @@ -502,8 +498,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue( egr_tensor16.place._equals( paddle.fluid.framework._current_expected_place())) - self.assertTrue( - np.array_equal(egr_tensor16.numpy(), egr_tensor4.numpy())) + np.testing.assert_array_equal(egr_tensor16.numpy(), egr_tensor4.numpy()) egr_tensor17 = core.eager.Tensor( value=egr_tensor4, @@ -516,8 +511,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor17.dtype, egr_tensor4.dtype) self.assertEqual(egr_tensor17.stop_gradient, True) self.assertTrue(egr_tensor17.place._equals(place)) - self.assertTrue( - np.array_equal(egr_tensor17.numpy(), egr_tensor4.numpy())) + np.testing.assert_array_equal(egr_tensor17.numpy(), egr_tensor4.numpy()) egr_tensor18 = core.eager.Tensor( egr_tensor4, @@ -530,8 +524,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor18.dtype, egr_tensor4.dtype) self.assertEqual(egr_tensor18.stop_gradient, True) self.assertTrue(egr_tensor18.place._equals(place)) - self.assertTrue( - np.array_equal(egr_tensor18.numpy(), egr_tensor4.numpy())) + np.testing.assert_array_equal(egr_tensor18.numpy(), egr_tensor4.numpy()) egr_tensor19 = core.eager.Tensor( egr_tensor4, @@ -544,8 +537,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor19.dtype, egr_tensor4.dtype) self.assertEqual(egr_tensor19.stop_gradient, True) self.assertTrue(egr_tensor19.place._equals(place)) - self.assertTrue( - np.array_equal(egr_tensor19.numpy(), egr_tensor4.numpy())) + np.testing.assert_array_equal(egr_tensor19.numpy(), egr_tensor4.numpy()) # init eager tensor by framework tensor x = np.random.rand(3, 3).astype('float32') @@ -560,7 +552,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertTrue( egr_tensor20.place._equals( paddle.fluid.framework._current_expected_place())) - self.assertTrue(np.array_equal(egr_tensor20.numpy(), x)) + np.testing.assert_array_equal(egr_tensor20.numpy(), x) egr_tensor21 = core.eager.Tensor(value=t, place=place) self.assertEqual(egr_tensor21.persistable, False) @@ -569,7 +561,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor21.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor21.stop_gradient, True) self.assertTrue(egr_tensor21.place._equals(place)) - self.assertTrue(np.array_equal(egr_tensor21.numpy(), x)) + np.testing.assert_array_equal(egr_tensor21.numpy(), x) egr_tensor22 = core.eager.Tensor(t, place=place) self.assertEqual(egr_tensor22.persistable, False) @@ -578,7 +570,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor22.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor22.stop_gradient, True) self.assertTrue(egr_tensor22.place._equals(place)) - self.assertTrue(np.array_equal(egr_tensor22.numpy(), x)) + np.testing.assert_array_equal(egr_tensor22.numpy(), x) egr_tensor23 = core.eager.Tensor(t, place, name="from_framework_tensor") self.assertEqual(egr_tensor23.persistable, False) @@ -587,7 +579,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor23.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor23.stop_gradient, True) self.assertTrue(egr_tensor23.place._equals(place)) - self.assertTrue(np.array_equal(egr_tensor23.numpy(), x)) + np.testing.assert_array_equal(egr_tensor23.numpy(), x) egr_tensor24 = core.eager.Tensor(value=t, place=place, @@ -598,7 +590,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor24.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor24.stop_gradient, True) self.assertTrue(egr_tensor24.place._equals(place)) - self.assertTrue(np.array_equal(egr_tensor24.numpy(), x)) + np.testing.assert_array_equal(egr_tensor24.numpy(), x) # Bad usage # SyntaxError: positional argument follows keyword argument @@ -632,53 +624,53 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): core.CPUPlace()) tensor1.persistable = True self.assertEqual(tensor1.stop_gradient, True) - self.assertTrue(np.array_equal(tensor.numpy(), arr)) + np.testing.assert_array_equal(tensor.numpy(), arr) print("Test copy_") tensor.copy_(tensor1, True) self.assertEqual(tensor.persistable, False) self.assertEqual(tensor.shape, [4, 16]) self.assertEqual(tensor.dtype, core.VarDesc.VarType.FP32) - self.assertTrue(np.array_equal(tensor.numpy(), arr1)) + np.testing.assert_array_equal(tensor.numpy(), arr1) print("Test _copy_to") tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, core.CPUPlace()) - self.assertTrue(np.array_equal(tensor2.numpy(), arr2)) + np.testing.assert_array_equal(tensor2.numpy(), arr2) self.assertTrue(tensor2.place.is_cpu_place()) tensor2.persistable = True tensor2.stop_gradient = False if core.is_compiled_with_cuda(): tensor3 = tensor2._copy_to(core.CUDAPlace(0), True) - self.assertTrue(np.array_equal(tensor3.numpy(), arr2)) + np.testing.assert_array_equal(tensor3.numpy(), arr2) self.assertEqual(tensor3.persistable, True) self.assertEqual(tensor3.stop_gradient, True) self.assertTrue(tensor3.place.is_gpu_place()) tensor4 = tensor2.cuda(0, True) - self.assertTrue(np.array_equal(tensor4.numpy(), arr2)) + np.testing.assert_array_equal(tensor4.numpy(), arr2) self.assertEqual(tensor4.persistable, True) self.assertEqual(tensor4.stop_gradient, False) self.assertTrue(tensor4.place.is_gpu_place()) tensor5 = tensor4.cpu() - self.assertTrue(np.array_equal(tensor5.numpy(), arr2)) + np.testing.assert_array_equal(tensor5.numpy(), arr2) self.assertEqual(tensor5.persistable, True) self.assertEqual(tensor5.stop_gradient, False) self.assertTrue(tensor5.place.is_cpu_place()) tensor10 = paddle.to_tensor([1, 2, 3], place='gpu_pinned') tensor11 = tensor10._copy_to(core.CUDAPlace(0), True) - self.assertTrue( - np.array_equal(tensor10.numpy(), tensor11.numpy())) + np.testing.assert_array_equal(tensor10.numpy(), + tensor11.numpy()) else: tensor3 = tensor2._copy_to(core.CPUPlace(), True) - self.assertTrue(np.array_equal(tensor3.numpy(), arr2)) + np.testing.assert_array_equal(tensor3.numpy(), arr2) self.assertEqual(tensor3.persistable, True) self.assertEqual(tensor3.stop_gradient, True) self.assertTrue(tensor3.place.is_cpu_place()) tensor4 = tensor2.cpu() - self.assertTrue(np.array_equal(tensor4.numpy(), arr2)) + np.testing.assert_array_equal(tensor4.numpy(), arr2) self.assertEqual(tensor4.persistable, True) self.assertEqual(tensor4.stop_gradient, False) self.assertTrue(tensor4.place.is_cpu_place()) @@ -700,15 +692,15 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): else: tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, core.CPUPlace()) - self.assertTrue(np.array_equal(tensor.numpy(), arr)) - self.assertTrue(np.array_equal(tensor2.numpy(), arr2)) + np.testing.assert_array_equal(tensor.numpy(), arr) + np.testing.assert_array_equal(tensor2.numpy(), arr2) tensor2._share_buffer_to(tensor) - self.assertTrue(np.array_equal(tensor.numpy(), arr2)) - self.assertTrue(np.array_equal(tensor2.numpy(), arr2)) + np.testing.assert_array_equal(tensor.numpy(), arr2) + np.testing.assert_array_equal(tensor2.numpy(), arr2) self.assertTrue(tensor._is_shared_buffer_with(tensor2)) self.assertTrue(tensor2._is_shared_buffer_with(tensor)) tensor._share_buffer_to(tensor3) - self.assertTrue(np.array_equal(tensor3.numpy(), arr2)) + np.testing.assert_array_equal(tensor3.numpy(), arr2) self.assertTrue(tensor3._is_shared_buffer_with(tensor)) def test_share_underline_tensor_to(self): @@ -728,15 +720,15 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): else: tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, core.CPUPlace()) - self.assertTrue(np.array_equal(tensor.numpy(), arr)) - self.assertTrue(np.array_equal(tensor2.numpy(), arr2)) + np.testing.assert_array_equal(tensor.numpy(), arr) + np.testing.assert_array_equal(tensor2.numpy(), arr2) tensor2._share_underline_tensor_to(tensor) - self.assertTrue(np.array_equal(tensor.numpy(), arr2)) - self.assertTrue(np.array_equal(tensor2.numpy(), arr2)) + np.testing.assert_array_equal(tensor.numpy(), arr2) + np.testing.assert_array_equal(tensor2.numpy(), arr2) self.assertTrue(tensor._is_shared_underline_tensor_with(tensor2)) self.assertTrue(tensor2._is_shared_underline_tensor_with(tensor)) tensor._share_underline_tensor_to(tensor3) - self.assertTrue(np.array_equal(tensor3.numpy(), arr2)) + np.testing.assert_array_equal(tensor3.numpy(), arr2) self.assertTrue(tensor3._is_shared_underline_tensor_with(tensor)) def test_properties(self): @@ -810,7 +802,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): egr_tensor = core.eager.Tensor(value=ori_arr) self.assertEqual(egr_tensor.stop_gradient, True) self.assertEqual(egr_tensor.shape, [4, 16, 16, 32]) - self.assertTrue(np.array_equal(egr_tensor.numpy(), ori_arr)) + np.testing.assert_array_equal(egr_tensor.numpy(), ori_arr) ori_place = egr_tensor.place new_arr = np.random.rand(4, 16, 16, 32).astype('float32') @@ -820,7 +812,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): self.assertEqual(egr_tensor.stop_gradient, True) self.assertTrue(egr_tensor.place._equals(ori_place)) self.assertEqual(egr_tensor.shape, [4, 16, 16, 32]) - self.assertTrue(np.array_equal(egr_tensor.numpy(), new_arr)) + np.testing.assert_array_equal(egr_tensor.numpy(), new_arr) def test_sharding_related_api(self): with _test_eager_guard(): @@ -839,7 +831,7 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): out = x + x out.backward() x._copy_gradient_from(y) - self.assertTrue(np.array_equal(x.grad.numpy(), np_y)) + np.testing.assert_array_equal(x.grad.numpy(), np_y) def test_clear(self): with _test_eager_guard(): @@ -862,11 +854,10 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): linear = paddle.nn.Linear(1, 3) linear_copy = copy.deepcopy(linear) linear_copy2 = linear.weight._copy_to(core.CPUPlace(), True) - self.assertTrue( - np.array_equal(linear.weight.numpy(), - linear_copy.weight.numpy())) - self.assertTrue( - np.array_equal(linear.weight.numpy(), linear_copy2.numpy())) + np.testing.assert_array_equal(linear.weight.numpy(), + linear_copy.weight.numpy()) + np.testing.assert_array_equal(linear.weight.numpy(), + linear_copy2.numpy()) def func_fp16_initilaizer(self): paddle.set_default_dtype("float16") @@ -905,7 +896,7 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): res2 = self.func_fp16_initilaizer() for i in range(len(res1)): - self.assertTrue(np.array_equal(res1[i], res2[i])) + np.testing.assert_array_equal(res1[i], res2[i]) def func_layer_helper_base(self, value): base = paddle.fluid.layer_helper_base.LayerHelperBase( @@ -924,8 +915,8 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): res3 = self.func_base_to_variable(value) res2 = self.func_layer_helper_base(value) res4 = self.func_base_to_variable(value) - self.assertTrue(np.array_equal(res1, res2)) - self.assertTrue(np.array_equal(res3, res4)) + np.testing.assert_array_equal(res1, res2) + np.testing.assert_array_equal(res3, res4) def test_backward_with_single_tensor(self): with _test_eager_guard(): @@ -939,11 +930,11 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor12.stop_gradient, True) self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace())) - self.assertTrue(np.array_equal(egr_tensor12.numpy(), arr4)) - self.assertTrue(np.array_equal(egr_tensor12.gradient(), None)) + np.testing.assert_array_equal(egr_tensor12.numpy(), arr4) + np.testing.assert_array_equal(egr_tensor12.gradient(), None) egr_tensor12.stop_gradient = False egr_tensor12.backward() - self.assertTrue(np.array_equal(egr_tensor12.gradient(), arr)) + np.testing.assert_array_equal(egr_tensor12.gradient(), arr) def test_set_value(self): with _test_eager_guard(): @@ -953,7 +944,7 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): self.assertFalse(np.array_equal(linear.weight.numpy(), new_weight)) linear.weight.set_value(new_weight) - self.assertTrue(np.array_equal(linear.weight.numpy(), new_weight)) + np.testing.assert_array_equal(linear.weight.numpy(), new_weight) self.assertTrue(linear.weight.place._equals(ori_place)) diff --git a/python/paddle/fluid/tests/unittests/test_egr_string_tensor_api.py b/python/paddle/fluid/tests/unittests/test_egr_string_tensor_api.py index 3b5ec683bc7..64f8d5bbedc 100644 --- a/python/paddle/fluid/tests/unittests/test_egr_string_tensor_api.py +++ b/python/paddle/fluid/tests/unittests/test_egr_string_tensor_api.py @@ -40,28 +40,28 @@ class EagerStringTensorTestCase(unittest.TestCase): ST2 = core.eager.StringTensor(shape, "ST2") # constructor 2 self.assertEqual(ST2.name, "ST2") self.assertEqual(ST2.shape, shape) - self.assertTrue( - np.array_equal(ST2.numpy(), np.empty(shape, dtype=np.unicode_))) + np.testing.assert_array_equal(ST2.numpy(), + np.empty(shape, dtype=np.unicode_)) ST3 = core.eager.StringTensor(self.str_arr, "ST3") # constructor 3 self.assertEqual(ST3.name, "ST3") self.assertEqual(ST3.shape, list(self.str_arr.shape)) - self.assertTrue(np.array_equal(ST3.numpy(), self.str_arr)) + np.testing.assert_array_equal(ST3.numpy(), self.str_arr) ST4 = core.eager.StringTensor(self.str_arr) # constructor 4 self.assertEqual(ST4.name, "generated_string_tensor_1") self.assertEqual(ST4.shape, list(self.str_arr.shape)) - self.assertTrue(np.array_equal(ST4.numpy(), self.str_arr)) + np.testing.assert_array_equal(ST4.numpy(), self.str_arr) ST5 = core.eager.StringTensor(ST4) # constructor 5 self.assertEqual(ST5.name, "generated_string_tensor_2") self.assertEqual(ST5.shape, list(self.str_arr.shape)) - self.assertTrue(np.array_equal(ST5.numpy(), self.str_arr)) + np.testing.assert_array_equal(ST5.numpy(), self.str_arr) ST6 = core.eager.StringTensor(ST5, "ST6") # constructor 6 self.assertEqual(ST6.name, "ST6") self.assertEqual(ST6.shape, list(self.str_arr.shape)) - self.assertTrue(np.array_equal(ST6.numpy(), self.str_arr)) + np.testing.assert_array_equal(ST6.numpy(), self.str_arr) for st in [ST1, ST2, ST3, ST4, ST5, ST6]: # All StringTensors are on cpu place so far. @@ -74,25 +74,25 @@ class EagerStringTensorTestCase(unittest.TestCase): name="ST1") # constructor 2 self.assertEqual(ST1.name, "ST1") self.assertEqual(ST1.shape, shape) - self.assertTrue( - np.array_equal(ST1.numpy(), np.empty(shape, dtype=np.unicode_))) + np.testing.assert_array_equal(ST1.numpy(), + np.empty(shape, dtype=np.unicode_)) ST2 = core.eager.StringTensor(self.str_arr, name="ST2") # constructor 3 self.assertEqual(ST2.name, "ST2") self.assertEqual(ST2.shape, list(self.str_arr.shape)) - self.assertTrue(np.array_equal(ST2.numpy(), self.str_arr)) + np.testing.assert_array_equal(ST2.numpy(), self.str_arr) ST3 = core.eager.StringTensor(ST2, name="ST3") # constructor 6 self.assertEqual(ST3.name, "ST3") self.assertEqual(ST3.shape, list(self.str_arr.shape)) - self.assertTrue(np.array_equal(ST3.numpy(), self.str_arr)) + np.testing.assert_array_equal(ST3.numpy(), self.str_arr) ST4 = core.eager.StringTensor(value=ST2, name="ST4") # constructor 6 self.assertEqual(ST4.name, "ST4") self.assertEqual(ST4.shape, list(self.str_arr.shape)) - self.assertTrue(np.array_equal(ST4.numpy(), self.str_arr)) + np.testing.assert_array_equal(ST4.numpy(), self.str_arr) for st in [ST1, ST2, ST3, ST4]: # All StringTensors are on cpu place so far. self.assertTrue(st.place._equals(core.CPUPlace())) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py index e2366248391..25a0c0a0652 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py @@ -206,11 +206,15 @@ class TestElementwiseMinOpFP16(unittest.TestCase): z_1, x_g_1, y_g_1 = self.get_out_and_grad(x_np, y_np, axis, place, False) z_2, x_g_2, y_g_2 = self.get_out_and_grad(x_np, y_np, axis, place, True) - self.assertTrue(np.array_equal(z_1, z_2), "{} vs {}".format(z_1, z_2)) - self.assertTrue(np.array_equal(x_g_1, x_g_2), - "{} vs {}".format(x_g_1, x_g_2)) - self.assertTrue(np.array_equal(y_g_1, y_g_2), - "{} vs {}".format(y_g_1, y_g_2)) + np.testing.assert_array_equal(z_1, + z_2, + err_msg='{} vs {}'.format(z_1, z_2)) + np.testing.assert_array_equal(x_g_1, + x_g_2, + err_msg='{} vs {}'.format(x_g_1, x_g_2)) + np.testing.assert_array_equal(y_g_1, + y_g_2, + err_msg='{} vs {}'.format(y_g_1, y_g_2)) def test_main(self): self.check_main((13, 17), (13, 17)) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py index 12f2a217360..904b9fe06de 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_pow_op.py @@ -210,9 +210,9 @@ class TestElementwisePowGradOpInt(unittest.TestCase): y.stop_gradient = False res = x**y res.backward() - self.assertTrue(np.array_equal(res.gradient(), self.grad_res)) - self.assertTrue(np.array_equal(x.gradient(), self.grad_x)) - self.assertTrue(np.array_equal(y.gradient(), self.grad_y)) + np.testing.assert_array_equal(res.gradient(), self.grad_res) + np.testing.assert_array_equal(x.gradient(), self.grad_x) + np.testing.assert_array_equal(y.gradient(), self.grad_y) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) diff --git a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py index 74d101497b8..b42a85fbb91 100644 --- a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py @@ -36,7 +36,7 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase): for p in self.get_places(): grad_value1 = self.run_program(p, stop_gradient=False) grad_value2 = self.run_program(p, stop_gradient=True) - self.assertTrue(np.array_equal(grad_value1, grad_value2)) + np.testing.assert_array_equal(grad_value1, grad_value2) def run_program(self, place, stop_gradient=False): np.random.seed(1) diff --git a/python/paddle/fluid/tests/unittests/test_executor_check_fetch_list.py b/python/paddle/fluid/tests/unittests/test_executor_check_fetch_list.py index 9d1c902fdc2..ec727193556 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_check_fetch_list.py +++ b/python/paddle/fluid/tests/unittests/test_executor_check_fetch_list.py @@ -50,7 +50,7 @@ class TestCheckFetchList(unittest.TestCase): fetch_list=[self.fetch_list], # support single list/tuple return_numpy=True) - self.assertTrue(np.array_equal(res[0], self.expected)) + np.testing.assert_array_equal(res[0], self.expected) def test_with_error(self): with self.assertRaises(TypeError): diff --git a/python/paddle/fluid/tests/unittests/test_executor_return_tensor_not_overwriting.py b/python/paddle/fluid/tests/unittests/test_executor_return_tensor_not_overwriting.py index 81bc7021280..a78528a73d8 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_return_tensor_not_overwriting.py +++ b/python/paddle/fluid/tests/unittests/test_executor_return_tensor_not_overwriting.py @@ -65,7 +65,7 @@ class TestExecutorReturnTensorNotOverwritingWithOptest(OpTest): add_out1 = np.array(add_out[0]) mul_out = self.calc_mul_out(place, parallel) add_out2 = np.array(add_out[0]) - self.assertTrue(np.array_equal(add_out1, add_out2)) + np.testing.assert_array_equal(add_out1, add_out2) class TestExecutorReturnTensorNotOverOverwritingWithLayers(unittest.TestCase): @@ -108,7 +108,7 @@ class TestExecutorReturnTensorNotOverOverwritingWithLayers(unittest.TestCase): add_out1 = np.array(add_out[0]) sub_out = self.calc_sub_out(place, parallel) add_out2 = np.array(add_out[0]) - self.assertTrue(np.array_equal(add_out1, add_out2)) + np.testing.assert_array_equal(add_out1, add_out2) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_expand_op.py b/python/paddle/fluid/tests/unittests/test_expand_op.py index d0d9a1f7e21..9336c4a405e 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_op.py @@ -272,10 +272,8 @@ class TestExpandDygraphAPI(unittest.TestCase): c = paddle.fluid.layers.expand(a, expand_times=paddle.to_tensor( [2, 3], dtype='int32')) - self.assertTrue( - np.array_equal(b.numpy(), np.tile(a.numpy(), [2, 3]))) - self.assertTrue( - np.array_equal(c.numpy(), np.tile(a.numpy(), [2, 3]))) + np.testing.assert_array_equal(b.numpy(), np.tile(a.numpy(), [2, 3])) + np.testing.assert_array_equal(c.numpy(), np.tile(a.numpy(), [2, 3])) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py index 52b9234263d..6fc6fc8f7eb 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py @@ -277,11 +277,11 @@ class TestExpandV2DygraphAPI(unittest.TestCase): np_array = np.array([2, 5]) expand_2 = paddle.expand(a, shape=np_array) - self.assertTrue( - np.array_equal(egr_expand_1.numpy(), egr_expand_2.numpy())) - self.assertTrue(np.array_equal(expand_1.numpy(), expand_2.numpy())) - self.assertTrue( - np.array_equal(expand_1.numpy(), egr_expand_1.numpy())) + np.testing.assert_array_equal(egr_expand_1.numpy(), + egr_expand_2.numpy()) + np.testing.assert_array_equal(expand_1.numpy(), expand_2.numpy()) + np.testing.assert_array_equal(expand_1.numpy(), + egr_expand_1.numpy()) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_exponential_op.py b/python/paddle/fluid/tests/unittests/test_exponential_op.py index 72b4d899044..2438b754a12 100644 --- a/python/paddle/fluid/tests/unittests/test_exponential_op.py +++ b/python/paddle/fluid/tests/unittests/test_exponential_op.py @@ -94,7 +94,7 @@ class TestExponentialAPI(unittest.TestCase): self.assertTrue(np.min(y.numpy()) >= 0) y.backward() - self.assertTrue(np.array_equal(x.grad.numpy(), np.zeros([10, 10]))) + np.testing.assert_array_equal(x.grad.numpy(), np.zeros([10, 10])) paddle.enable_static() def test_fixed_random_number(self): diff --git a/python/paddle/fluid/tests/unittests/test_fc_op.py b/python/paddle/fluid/tests/unittests/test_fc_op.py index 439296e4d8f..ac69b8e8c6f 100644 --- a/python/paddle/fluid/tests/unittests/test_fc_op.py +++ b/python/paddle/fluid/tests/unittests/test_fc_op.py @@ -173,7 +173,7 @@ class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase): res_1 = run_program(-1) res_2 = run_program(2) - self.assertTrue(np.array_equal(res_1, res_2)) + np.testing.assert_array_equal(res_1, res_2) class TestFCOpError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_fetch_var.py b/python/paddle/fluid/tests/unittests/test_fetch_var.py index 2a0d29be47d..1641adbb30c 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_var.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_var.py @@ -17,14 +17,14 @@ from __future__ import print_function import paddle.fluid as fluid import paddle.fluid.layers as layers import op_test -import numpy +import numpy as np import unittest class TestFetchVar(unittest.TestCase): def set_input(self): - self.val = numpy.array([1, 3, 5]).astype(numpy.int32) + self.val = np.array([1, 3, 5]).astype(np.int32) def test_fetch_var(self): self.set_input() @@ -33,15 +33,17 @@ class TestFetchVar(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_main_program(), feed={}, fetch_list=[]) fetched_x = fluid.executor._fetch_var("x") - self.assertTrue(numpy.array_equal(fetched_x, self.val), - "fetch_x=%s val=%s" % (fetched_x, self.val)) + np.testing.assert_array_equal(fetched_x, + self.val, + err_msg='fetch_x=%s val=%s' % + (fetched_x, self.val)) self.assertEqual(fetched_x.dtype, self.val.dtype) class TestFetchNullVar(TestFetchVar): def set_input(self): - self.val = numpy.array([]).astype(numpy.int32) + self.val = np.array([]).astype(np.int32) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_fill_any_op.py b/python/paddle/fluid/tests/unittests/test_fill_any_op.py index ad7fd26a0a0..305f2dfd3b1 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_any_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_any_op.py @@ -110,7 +110,7 @@ class TestFillAnyInplace(unittest.TestCase): y = 2 * x y.fill_(1) y.backward() - self.assertTrue(np.array_equal(x.grad.numpy(), np.zeros([10, 10]))) + np.testing.assert_array_equal(x.grad.numpy(), np.zeros([10, 10])) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py index bd87181ebcc..945bf11c533 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py @@ -127,7 +127,7 @@ class TestFillConstantOpWithSelectedRows(unittest.TestCase): result_array = np.array(out.get_tensor()) full_array = np.full((123, 92), 3.8, 'float32') - self.assertTrue(np.array_equal(result_array, full_array)) + np.testing.assert_array_equal(result_array, full_array) def test_fill_constant_with_selected_rows(self): places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_fill_op.py b/python/paddle/fluid/tests/unittests/test_fill_op.py index fdf4ec85627..29cf097acc0 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_op.py @@ -78,7 +78,7 @@ class TestFillOp3(unittest.TestCase): result_array = np.array(out) full_array = np.array(val, 'float32') - self.assertTrue(np.array_equal(result_array, full_array)) + np.testing.assert_array_equal(result_array, full_array) def test_fill_op(self): places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_fused_matmul_bias.py b/python/paddle/fluid/tests/unittests/test_fused_matmul_bias.py index f2f56e42543..b26b5e2f046 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_matmul_bias.py +++ b/python/paddle/fluid/tests/unittests/test_fused_matmul_bias.py @@ -97,19 +97,19 @@ class TestFusedMatmulBias(unittest.TestCase): z = fused_matmul_bias(x, y, bias, trans_x, trans_y) z_np = matmul(x_np, y_np, bias_np, trans_x, trans_y) - self.assertTrue(np.array_equal(z.numpy(), z_np)) + np.testing.assert_array_equal(z.numpy(), z_np) z_grad_np = self.rand_data(z_np.shape, dtype) paddle.autograd.backward(z, grad_tensors=[paddle.to_tensor(z_grad_np)]) x_grad_np, y_grad_np, bias_grad_np = matmul_grad( x_np, y_np, bias_np, z_grad_np, trans_x, trans_y) - self.assertTrue(np.array_equal(x.grad.numpy(), x_grad_np)) + np.testing.assert_array_equal(x.grad.numpy(), x_grad_np) self.assertEqual(y_grad_np.shape, y_np.shape) - self.assertTrue(np.array_equal(y.grad.numpy(), y_grad_np)) + np.testing.assert_array_equal(y.grad.numpy(), y_grad_np) if need_bias: - self.assertTrue(np.array_equal(bias.grad.numpy(), bias_grad_np)) + np.testing.assert_array_equal(bias.grad.numpy(), bias_grad_np) else: self.assertTrue(bias_grad_np is None) @@ -138,7 +138,7 @@ class TestFusedLinear(unittest.TestCase): linear = FusedLinear(40, 50, transpose_weight=transpose) y1 = linear(x) y2 = fused_linear(x, linear.weight, linear.bias, transpose) - self.assertTrue(np.array_equal(y1.numpy(), y2.numpy())) + np.testing.assert_array_equal(y1.numpy(), y2.numpy()) def test_non_transpose(self): self.check_fused_linear(False) diff --git a/python/paddle/fluid/tests/unittests/test_gather_op.py b/python/paddle/fluid/tests/unittests/test_gather_op.py index f76094c92eb..6a0fdc4ff61 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_op.py @@ -341,7 +341,7 @@ class API_TestDygraphGather(unittest.TestCase): gpu_value = gpu_exe.run(feed=feed, fetch_list=fetch)[0] return gpu_value - self.assertTrue(np.array_equal(test_dygraph(), test_static_graph())) + np.testing.assert_array_equal(test_dygraph(), test_static_graph()) class TestGathertError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py b/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py index d59cdc3e328..79194928f9d 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py @@ -324,8 +324,8 @@ class TestAmpScaler(unittest.TestCase): for param in model.parameters(): # param not update when tensor contains nan or inf - self.assertTrue( - np.array_equal(param.numpy(), params_init[param.name])) + np.testing.assert_array_equal(param.numpy(), + params_init[param.name]) def test_nan_inf(self): self.nan_inf() @@ -974,7 +974,7 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase): fetch_list=fetch_targets) print("pred.numpy()", pred.numpy()) print("result", results[0]) - self.assertTrue(np.array_equal(pred.numpy(), results[0])) + np.testing.assert_array_equal(pred.numpy(), results[0]) paddle.disable_static() def test_inference_save_load(self): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision_for_eager.py b/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision_for_eager.py index 3b1a0436556..6a256ec1088 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision_for_eager.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision_for_eager.py @@ -323,8 +323,8 @@ class TestAmpScaler(unittest.TestCase): for param in model.parameters(): # param not update when tensor contains nan or inf - self.assertTrue( - np.array_equal(param.numpy(), params_init[param.name])) + np.testing.assert_array_equal(param.numpy(), + params_init[param.name]) def test_nan_inf(self): self.nan_inf() @@ -965,7 +965,7 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase): fetch_list=fetch_targets) print("pred.numpy()", pred.numpy()) print("result", results[0]) - self.assertTrue(np.array_equal(pred.numpy(), results[0])) + np.testing.assert_array_equal(pred.numpy(), results[0]) paddle.disable_static() def test_inference_save_load(self): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py b/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py index 7a5934b4fdc..0cd97cbf328 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py @@ -314,8 +314,8 @@ class TestImperativeAutoPrune(unittest.TestCase): learning_rate=0.003, parameter_list=(linear.parameters() + linear2.parameters())) optimizer.minimize(out2) - self.assertTrue( - np.array_equal(linear2_origin, linear2.weight.numpy())) + np.testing.assert_array_equal(linear2_origin, + linear2.weight.numpy()) self.assertFalse( np.array_equal(linear_origin, linear.weight.numpy())) @@ -344,10 +344,9 @@ class TestImperativeAutoPrune(unittest.TestCase): learning_rate=0.003, parameter_list=(linear.parameters() + linear2.parameters())) optimizer.minimize(out2) - self.assertTrue( - np.array_equal(linear2_origin, linear2.weight.numpy())) - self.assertTrue(np.array_equal(linear_origin, - linear.weight.numpy())) + np.testing.assert_array_equal(linear2_origin, + linear2.weight.numpy()) + np.testing.assert_array_equal(linear_origin, linear.weight.numpy()) try: linear2.weight.gradient() except ValueError as e: diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index e67bae46a53..e1663ad400f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -159,8 +159,8 @@ class TestImperative(unittest.TestCase): out.backward() dy_grad2 = mlp._linear1.weight.gradient() self.assertFalse(fluid.dygraph.enabled()) - self.assertTrue(np.array_equal(dy_out1, dy_out2)) - self.assertTrue(np.array_equal(dy_grad1, dy_grad2)) + np.testing.assert_array_equal(dy_out1, dy_out2) + np.testing.assert_array_equal(dy_grad1, dy_grad2) def test_functional_dygraph_context(self): with _test_eager_guard(): @@ -190,8 +190,8 @@ class TestImperative(unittest.TestCase): dy_grad2 = mlp._linear1.weight.gradient() paddle.enable_static() self.assertFalse(paddle.in_dynamic_mode()) - self.assertTrue(np.array_equal(dy_out1, dy_out2)) - self.assertTrue(np.array_equal(dy_grad1, dy_grad2)) + np.testing.assert_array_equal(dy_out1, dy_out2) + np.testing.assert_array_equal(dy_grad1, dy_grad2) def test_functional_paddle_imperative_dygraph_context(self): with _test_eager_guard(): @@ -229,12 +229,12 @@ class TestImperative(unittest.TestCase): egr_tmp5 = fluid.core.eager.Tensor(value=x) egr_tmp6 = fluid.core.eager.Tensor(t) - self.assertTrue(np.array_equal(x, egr_tmp.numpy())) - self.assertTrue(np.array_equal(y, egr_tmp2.numpy())) - self.assertTrue(np.array_equal(x, egr_tmp3.numpy())) - self.assertTrue(np.array_equal(y, egr_tmp4.numpy())) - self.assertTrue(np.array_equal(x, egr_tmp5.numpy())) - self.assertTrue(np.array_equal(x, egr_tmp6.numpy())) + np.testing.assert_array_equal(x, egr_tmp.numpy()) + np.testing.assert_array_equal(y, egr_tmp2.numpy()) + np.testing.assert_array_equal(x, egr_tmp3.numpy()) + np.testing.assert_array_equal(y, egr_tmp4.numpy()) + np.testing.assert_array_equal(x, egr_tmp5.numpy()) + np.testing.assert_array_equal(x, egr_tmp6.numpy()) else: tmp = fluid.core.VarBase(value=x, place=fluid.core.CPUPlace()) tmp2 = fluid.core.VarBase(y, fluid.core.CPUPlace()) @@ -243,12 +243,12 @@ class TestImperative(unittest.TestCase): tmp5 = fluid.core.VarBase(value=x) tmp6 = fluid.core.VarBase(t) - self.assertTrue(np.array_equal(x, tmp.numpy())) - self.assertTrue(np.array_equal(y, tmp2.numpy())) - self.assertTrue(np.array_equal(x, tmp3.numpy())) - self.assertTrue(np.array_equal(y, tmp4.numpy())) - self.assertTrue(np.array_equal(x, tmp5.numpy())) - self.assertTrue(np.array_equal(x, tmp6.numpy())) + np.testing.assert_array_equal(x, tmp.numpy()) + np.testing.assert_array_equal(y, tmp2.numpy()) + np.testing.assert_array_equal(x, tmp3.numpy()) + np.testing.assert_array_equal(y, tmp4.numpy()) + np.testing.assert_array_equal(x, tmp5.numpy()) + np.testing.assert_array_equal(x, tmp6.numpy()) def test_create_varbase(self): with fluid.dygraph.guard(): @@ -479,10 +479,10 @@ class TestImperative(unittest.TestCase): feed={inp.name: np_inp}, fetch_list=[x.name, param_grads[1].name]) - self.assertTrue(np.array_equal(dy_out, static_out)) - self.assertTrue(np.array_equal(dy_grad, static_grad)) - self.assertTrue(np.array_equal(dy_out2, static_out)) - self.assertTrue(np.array_equal(dy_grad2, static_grad)) + np.testing.assert_array_equal(dy_out, static_out) + np.testing.assert_array_equal(dy_grad, static_grad) + np.testing.assert_array_equal(dy_out2, static_out) + np.testing.assert_array_equal(dy_grad2, static_grad) def test_layer_in_out(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -577,9 +577,9 @@ class TestImperative(unittest.TestCase): loss2 = x * z loss1.backward(retain_graph=True) loss2.backward(retain_graph=True) - self.assertTrue(np.array_equal(x.grad.numpy(), [23.])) - self.assertTrue(np.array_equal(y.grad.numpy(), [25.])) - self.assertTrue(np.array_equal(z.grad.numpy(), [5.])) + np.testing.assert_array_equal(x.grad.numpy(), [23.0]) + np.testing.assert_array_equal(y.grad.numpy(), [25.0]) + np.testing.assert_array_equal(z.grad.numpy(), [5.0]) x.clear_grad() y.clear_grad() z.clear_grad() @@ -592,13 +592,13 @@ class TestImperative(unittest.TestCase): loss = fun(x, y, z) loss.backward(retain_graph=True) # x.grad = 2*x*y + z + 2*y = 27 - self.assertTrue(np.array_equal(x.grad.numpy(), [27])) + np.testing.assert_array_equal(x.grad.numpy(), [27]) loss.backward(retain_graph=True) - self.assertTrue(np.array_equal(x.grad.numpy(), [54])) + np.testing.assert_array_equal(x.grad.numpy(), [54]) loss.backward() - self.assertTrue(np.array_equal(x.grad.numpy(), [81])) + np.testing.assert_array_equal(x.grad.numpy(), [81]) with self.assertRaises(RuntimeError): loss.backward() @@ -608,8 +608,8 @@ class TestImperative(unittest.TestCase): dx = paddle.grad([loss1], x, create_graph=True)[0] loss = loss1 + loss2 + dx loss.backward() - self.assertTrue(np.array_equal(dx.grad.numpy(), [1])) - self.assertTrue(np.array_equal(x.grad.numpy(), [108])) + np.testing.assert_array_equal(dx.grad.numpy(), [1]) + np.testing.assert_array_equal(x.grad.numpy(), [108]) def test_mlp(sort_sum_gradient): fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient}) @@ -641,7 +641,7 @@ class TestImperative(unittest.TestCase): loss = mlp1(x) loss.backward() - self.assertTrue(np.array_equal(loss.grad.numpy(), [1])) + np.testing.assert_array_equal(loss.grad.numpy(), [1]) self.assertTrue( np.allclose(mlp1._linear1.weight.grad.numpy(), expected_weight1_grad)) @@ -656,7 +656,7 @@ class TestImperative(unittest.TestCase): expected_bias2_grad)) mlp2.clear_gradients() - self.assertTrue(np.array_equal(clear_loss.grad.numpy(), [1])) + np.testing.assert_array_equal(clear_loss.grad.numpy(), [1]) if ((batch_id + 1) % 10) % 2 == 0: mlp1.clear_gradients() expected_weight1_grad = 0. @@ -785,14 +785,14 @@ class TestImperative(unittest.TestCase): param_grads[1][1].name, param_grads[2][1].name ]) - self.assertTrue(np.array_equal(dy_out, static_out)) - self.assertTrue(np.array_equal(dy_grad_h2o, static_grad_h2o)) - self.assertTrue(np.array_equal(dy_grad_h2h, static_grad_h2h)) - self.assertTrue(np.array_equal(dy_grad_i2h, static_grad_i2h)) - self.assertTrue(np.array_equal(dy_out2, static_out)) - self.assertTrue(np.array_equal(dy_grad_h2o2, static_grad_h2o)) - self.assertTrue(np.array_equal(dy_grad_h2h2, static_grad_h2h)) - self.assertTrue(np.array_equal(dy_grad_i2h2, static_grad_i2h)) + np.testing.assert_array_equal(dy_out, static_out) + np.testing.assert_array_equal(dy_grad_h2o, static_grad_h2o) + np.testing.assert_array_equal(dy_grad_h2h, static_grad_h2h) + np.testing.assert_array_equal(dy_grad_i2h, static_grad_i2h) + np.testing.assert_array_equal(dy_out2, static_out) + np.testing.assert_array_equal(dy_grad_h2o2, static_grad_h2o) + np.testing.assert_array_equal(dy_grad_h2h2, static_grad_h2h) + np.testing.assert_array_equal(dy_grad_i2h2, static_grad_i2h) def test_rnn(self): with _test_eager_guard(): @@ -846,7 +846,7 @@ class TestDygraphUtils(unittest.TestCase): a = paddle.to_tensor(a_np) res1 = func(a, act="hard_sigmoid") res2 = fluid.layers.hard_sigmoid(a) - self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) + np.testing.assert_array_equal(res1.numpy(), res2.numpy()) def test_append_activation_in_dygraph1(self): with _test_eager_guard(): @@ -875,7 +875,7 @@ class TestDygraphUtils(unittest.TestCase): a = paddle.to_tensor(a_np) res1 = func(a, act="sigmoid", use_cudnn=True) res2 = fluid.layers.sigmoid(a) - self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) + np.testing.assert_array_equal(res1.numpy(), res2.numpy()) def test_append_activation_in_dygraph3(self): with _test_eager_guard(): @@ -892,7 +892,7 @@ class TestDygraphUtils(unittest.TestCase): a = paddle.to_tensor(a_np) res1 = func(a) res2 = fluid.layers.relu(a) - self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) + np.testing.assert_array_equal(res1.numpy(), res2.numpy()) def test_append_activation_in_dygraph_use_mkldnn(self): with _test_eager_guard(): @@ -911,7 +911,7 @@ class TestDygraphUtils(unittest.TestCase): finally: fluid.set_flags({'FLAGS_use_mkldnn': False}) res2 = fluid.layers.relu(a) - self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) + np.testing.assert_array_equal(res1.numpy(), res2.numpy()) def test_append_activation_in_dygraph_global_use_mkldnn(self): with _test_eager_guard(): @@ -937,7 +937,7 @@ class TestDygraphUtils(unittest.TestCase): a = paddle.to_tensor(a_np) res1 = func(a, bias=a) res2 = paddle.add(a, a) - self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) + np.testing.assert_array_equal(res1.numpy(), res2.numpy()) def test_append_bias_in_dygraph(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_data_parallel.py b/python/paddle/fluid/tests/unittests/test_imperative_data_parallel.py index 8e9c3c280f4..3e667563e34 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_data_parallel.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_data_parallel.py @@ -58,8 +58,8 @@ class TestDataParallelStateDict(unittest.TestCase): for k, v in single_state.items(): self.assertTrue(k in parallel_state) - self.assertTrue( - np.array_equal(v.numpy(), parallel_state[k].numpy())) + np.testing.assert_array_equal(v.numpy(), + parallel_state[k].numpy()) base_para[k] = v.numpy() @@ -75,7 +75,7 @@ class TestDataParallelStateDict(unittest.TestCase): parallel_state = parallel_mlp.state_dict() for k, v in parallel_state.items(): - self.assertTrue(np.array_equal(v.numpy(), base_para[k])) + np.testing.assert_array_equal(v.numpy(), base_para[k]) parallel_mlp.load_dict(base_para) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py index d80b708ebf2..5527ab27691 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py @@ -186,9 +186,8 @@ class TestEagerGrad(TestCase): out4 = paddle.mean(out3) egr_dout2, egr_dout3 = paddle.grad([out4], [out2, out3]) - self.assertTrue( - np.array_equal(dout2_record_by_hook[0].numpy(), - np.array([1., 2.]))) + np.testing.assert_array_equal(dout2_record_by_hook[0].numpy(), + np.array([1.0, 2.0])) x1 = paddle.to_tensor([1.0, 2.0]) x1.stop_gradient = False @@ -203,8 +202,8 @@ class TestEagerGrad(TestCase): self.assertEqual(dout2.stop_gradient, egr_dout2.stop_gradient) self.assertEqual(dout3.stop_gradient, egr_dout3.stop_gradient) - self.assertTrue(np.array_equal(dout2.numpy(), egr_dout2.numpy())) - self.assertTrue(np.array_equal(dout3.numpy(), egr_dout3.numpy())) + np.testing.assert_array_equal(dout2.numpy(), egr_dout2.numpy()) + np.testing.assert_array_equal(dout3.numpy(), egr_dout3.numpy()) class TestDygraphDoubleGrad(TestCase): @@ -392,15 +391,13 @@ class TestDygraphDoubleGrad(TestCase): if grad_y is not None: self.assertTrue(grad_y.stop_gradient) - self.assertTrue( - np.array_equal(grad_y.numpy(), - original_random_grad_y)) + np.testing.assert_array_equal(grad_y.numpy(), + original_random_grad_y) if grad_z is not None: self.assertTrue(grad_z.stop_gradient) - self.assertTrue( - np.array_equal(grad_z.numpy(), - original_random_grad_z)) + np.testing.assert_array_equal(grad_z.numpy(), + original_random_grad_z) def test_none_one_initial_gradient(self): with _test_eager_guard(): @@ -583,7 +580,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase): grad_2 = a.gradient() - self.assertTrue(np.array_equal(grad_1, grad_2)) + np.testing.assert_array_equal(grad_1, grad_2) def test_compare(self): with _test_eager_guard(): @@ -647,8 +644,8 @@ class TestDoubleGradResNet(TestCase): g_numpy = g.numpy() self.assertEqual(list(g_numpy.shape), list(out.shape)) - self.assertTrue(np.array_equal(egr_out, out)) - self.assertTrue(np.array_equal(egr_g_numpy, g_numpy)) + np.testing.assert_array_equal(egr_out, out) + np.testing.assert_array_equal(egr_g_numpy, g_numpy) @dygraph_guard def test_resnet_resnet101(self): @@ -679,8 +676,8 @@ class TestDoubleGradResNet(TestCase): g_numpy = g.numpy() self.assertEqual(list(g_numpy.shape), list(out.shape)) - self.assertTrue(np.array_equal(egr_out, out)) - self.assertTrue(np.array_equal(egr_g_numpy, g_numpy)) + np.testing.assert_array_equal(egr_out, out) + np.testing.assert_array_equal(egr_g_numpy, g_numpy) class TestDoubleGradBasics(TestCase): @@ -705,22 +702,22 @@ class TestDoubleGradBasics(TestCase): new_x_g.backward() out_ref = np.ones([3, 3]) * 12.0 - self.assertTrue(np.array_equal(out.numpy(), out_ref)) + np.testing.assert_array_equal(out.numpy(), out_ref) new_x_g_ref = np.ones([3, 3]) * 6.0 new_y_g_ref = np.ones([3, 3]) * 6.0 - self.assertTrue(np.array_equal(new_x_g.numpy(), new_x_g_ref)) - self.assertTrue(np.array_equal(new_y_g.numpy(), new_y_g_ref)) + np.testing.assert_array_equal(new_x_g.numpy(), new_x_g_ref) + np.testing.assert_array_equal(new_y_g.numpy(), new_y_g_ref) x_grad_ref = np.ones([3, 3]) * 0.0 - self.assertTrue(np.array_equal(x.grad.numpy(), x_grad_ref)) + np.testing.assert_array_equal(x.grad.numpy(), x_grad_ref) y_grad_ref = np.ones([3, 3]) * 3.0 - self.assertTrue(np.array_equal(y.grad.numpy(), y_grad_ref)) + np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref) grad_out_grad_ref = np.ones([3, 3]) * 6.0 - self.assertTrue( - np.array_equal(grad_out.grad.numpy(), grad_out_grad_ref)) + np.testing.assert_array_equal(grad_out.grad.numpy(), + grad_out_grad_ref) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_hook_for_layer.py b/python/paddle/fluid/tests/unittests/test_imperative_hook_for_layer.py index 87d0d8e81b0..a7e4af4165b 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_hook_for_layer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_hook_for_layer.py @@ -95,29 +95,27 @@ class Test_Forward_Hook(unittest.TestCase): forward_pre_hook_handle1 = simplenet.register_forward_pre_hook( forward_pre_hook1) outs_pre_hook = simplenet(input, y) - self.assertTrue( - np.array_equal(outs_pre_hook.numpy(), outs_origin1.numpy())) + np.testing.assert_array_equal(outs_pre_hook.numpy(), + outs_origin1.numpy()) # remove forward_pre_hook forward_pre_hook_handle1.remove() outs_pre_hook = simplenet(input, y) - self.assertTrue( - np.array_equal(outs_pre_hook.numpy(), outs_origin.numpy())) + np.testing.assert_array_equal(outs_pre_hook.numpy(), + outs_origin.numpy()) # register forward_posst_hook forward_post_hook_handle1 = simplenet.register_forward_post_hook( forward_post_hook1) outs_forward_hook = simplenet(input, y) - self.assertTrue( - np.array_equal(outs_forward_hook.numpy(), - outs_origin.numpy() * 2)) + np.testing.assert_array_equal(outs_forward_hook.numpy(), + outs_origin.numpy() * 2) # remove forward_post_hook forward_post_hook_handle1.remove() outs_forward_hook = simplenet(input, y) - self.assertTrue( - np.array_equal(outs_forward_hook.numpy(), - outs_origin.numpy())) + np.testing.assert_array_equal(outs_forward_hook.numpy(), + outs_origin.numpy()) # test forward_pre_hook and forward_post_hook that don't have return value def func_forward_hook(self): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py b/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py index 36bec7fb030..a1c97089234 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py @@ -218,7 +218,7 @@ class TestDygraphLoadStatic(unittest.TestCase): my_test = MyTest() my_test.set_dict(new_dict, use_structured_name=False) for k, v in my_test.state_dict().items(): - self.assertTrue(np.array_equal(v.numpy(), new_dict[v.name])) + np.testing.assert_array_equal(v.numpy(), new_dict[v.name]) temp_dir.cleanup() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py index f9306d0cfeb..8015fceff5d 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py @@ -200,10 +200,9 @@ class TestDygraphSimpleNet(unittest.TestCase): self.assertTrue( np.allclose(static_loss_value, dy_loss_value, rtol=1e-3)) for key, value in six.iteritems(static_param_init): - self.assertTrue(np.array_equal(value, dy_param_init[key])) + np.testing.assert_array_equal(value, dy_param_init[key]) for key, value in six.iteritems(static_param_updated): - self.assertTrue(np.array_equal(value, - dy_param_updated[key])) + np.testing.assert_array_equal(value, dy_param_updated[key]) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py b/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py index c0287668a31..a93471a09c9 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_numpy_bridge.py @@ -13,10 +13,11 @@ # limitations under the License. import unittest +import warnings + import numpy as np import paddle.fluid as fluid -import warnings -from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph +from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard class TestImperativeNumpyBridge(unittest.TestCase): @@ -31,14 +32,14 @@ class TestImperativeNumpyBridge(unittest.TestCase): w[-1].message) # Temporally diable zero_copy # var = fluid.dygraph.to_variable(data_np, zero_copy=True) - # self.assertTrue(np.array_equal(var.numpy(), data_np)) + # np.testing.assert_array_equal(var.numpy(), data_np) # data_np[0][0] = 4 # self.assertEqual(data_np[0][0], 4) # self.assertEqual(var[0][0].numpy()[0], 4) - # self.assertTrue(np.array_equal(var.numpy(), data_np)) + # np.testing.assert_array_equal(var.numpy(), data_np) var2 = fluid.dygraph.to_variable(data_np, zero_copy=False) - self.assertTrue(np.array_equal(var2.numpy(), data_np)) + np.testing.assert_array_equal(var2.numpy(), data_np) data_np[0][0] = -1 self.assertEqual(data_np[0][0], -1) if not _in_legacy_dygraph(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py index 064f0948cad..21327255fb6 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py @@ -573,7 +573,7 @@ class TestDygraphOCRAttention(unittest.TestCase): self.assertTrue(np.allclose(static_out, dy_out)) for key, value in six.iteritems(static_param_init_value): - self.assertTrue(np.array_equal(value, dy_param_init_value[key])) + np.testing.assert_array_equal(value, dy_param_init_value[key]) for key, value in six.iteritems(static_param_value): self.assertTrue(np.allclose(value, dy_param_value[key], rtol=1e-05)) @@ -582,7 +582,7 @@ class TestDygraphOCRAttention(unittest.TestCase): self.assertTrue(np.allclose(static_out, eager_out)) for key, value in six.iteritems(static_param_init_value): - self.assertTrue(np.array_equal(value, eager_param_init_value[key])) + np.testing.assert_array_equal(value, eager_param_init_value[key]) for key, value in six.iteritems(static_param_value): self.assertTrue( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index e5e26111381..34f77b199f2 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -376,15 +376,15 @@ class TestDygraphPtbRnn(unittest.TestCase): static_param_updated[static_param_name_list[k - 3]] = out[k] - self.assertTrue(np.array_equal(static_loss_value, dy_loss_value)) - self.assertTrue( - np.array_equal(static_last_cell_value, dy_last_cell_value)) - self.assertTrue( - np.array_equal(static_last_hidden_value, dy_last_hidden_value)) + np.testing.assert_array_equal(static_loss_value, dy_loss_value) + np.testing.assert_array_equal(static_last_cell_value, + dy_last_cell_value) + np.testing.assert_array_equal(static_last_hidden_value, + dy_last_hidden_value) for key, value in six.iteritems(static_param_init): - self.assertTrue(np.array_equal(value, dy_param_init[key])) + np.testing.assert_array_equal(value, dy_param_init[key]) for key, value in six.iteritems(static_param_updated): - self.assertTrue(np.array_equal(value, dy_param_updated[key])) + np.testing.assert_array_equal(value, dy_param_updated[key]) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py index 06bca877c87..d0b12f33051 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py @@ -164,15 +164,15 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): static_param_updated[static_param_name_list[k - 3]] = out[k] - self.assertTrue(np.array_equal(static_loss_value, dy_loss_value)) - self.assertTrue( - np.array_equal(static_last_cell_value, dy_last_cell_value)) - self.assertTrue( - np.array_equal(static_last_hidden_value, dy_last_hidden_value)) + np.testing.assert_array_equal(static_loss_value, dy_loss_value) + np.testing.assert_array_equal(static_last_cell_value, + dy_last_cell_value) + np.testing.assert_array_equal(static_last_hidden_value, + dy_last_hidden_value) for key, value in six.iteritems(static_param_init): - self.assertTrue(np.array_equal(value, dy_param_init[key])) + np.testing.assert_array_equal(value, dy_param_init[key]) for key, value in six.iteritems(static_param_updated): - self.assertTrue(np.array_equal(value, dy_param_updated[key])) + np.testing.assert_array_equal(value, dy_param_updated[key]) def test_ptb_rnn_sort_gradient(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py b/python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py index f59256f25f8..96a8e77f1a7 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py @@ -117,10 +117,10 @@ class TestRecurrentFeed(unittest.TestCase): static_dout = out[2] original_np1 = static_out_value - self.assertTrue(np.array_equal(static_sum_out, sum_out_value)) - self.assertTrue(np.array_equal(static_sum_out, eager_sum_out_value)) - self.assertTrue(np.array_equal(static_dout, dyout)) - self.assertTrue(np.array_equal(static_dout, eager_dyout)) + np.testing.assert_array_equal(static_sum_out, sum_out_value) + np.testing.assert_array_equal(static_sum_out, eager_sum_out_value) + np.testing.assert_array_equal(static_dout, dyout) + np.testing.assert_array_equal(static_dout, eager_dyout) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py index 593c0462122..7423b6ecfc4 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py @@ -387,8 +387,8 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if isinstance(v, (core.VarBase, core.eager.Tensor)): - self.assertTrue( - np.array_equal(v.numpy(), self.base_opti[v.name])) + np.testing.assert_array_equal(v.numpy(), + self.base_opti[v.name]) else: self.assertEqual(v, self.base_opti[k]) @@ -409,7 +409,7 @@ class TestDygraphPtbRnn(unittest.TestCase): base_t = self.model_base[k] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def func_testSetVariable(self): seed = 90 @@ -492,8 +492,8 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if isinstance(v, (core.VarBase, core.eager.Tensor)): - self.assertTrue( - np.array_equal(v.numpy(), self.base_opti[v.name])) + np.testing.assert_array_equal(v.numpy(), + self.base_opti[v.name]) else: self.assertEqual(v, self.base_opti[k]) @@ -514,7 +514,7 @@ class TestDygraphPtbRnn(unittest.TestCase): base_t = self.model_base[k] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def func_testSetNumpy(self): seed = 90 @@ -601,8 +601,8 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if isinstance(v, (core.VarBase, core.eager.Tensor)): - self.assertTrue( - np.array_equal(v.numpy(), self.base_opti[v.name])) + np.testing.assert_array_equal(v.numpy(), + self.base_opti[v.name]) else: self.assertEqual(v, self.base_opti[k]) @@ -625,7 +625,7 @@ class TestDygraphPtbRnn(unittest.TestCase): base_t = self.model_base[k] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def func_testSetVariableBeforeTrain(self): seed = 90 @@ -682,17 +682,15 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if k == "global_step": - self.assertTrue( - np.array_equal(v.numpy(), self.base_opti[v.name] + 1)) + np.testing.assert_array_equal(v.numpy(), + self.base_opti[v.name] + 1) if k.find("beta1_pow_acc_0") > 0: - self.assertTrue( - np.array_equal(v.numpy(), - self.base_opti[v.name] * adam._beta1)) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] * adam._beta1) if k.find("beta2_pow_acc_0") > 0: - self.assertTrue( - np.array_equal(v.numpy(), - self.base_opti[v.name] * adam._beta2)) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] * adam._beta2) state_dict = ptb_model.state_dict() @@ -700,7 +698,7 @@ class TestDygraphPtbRnn(unittest.TestCase): new_t = v.numpy() base_t = self.model_base[k] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def func_testLoadAndSetVarBaseBeforeTrain(self): seed = 90 @@ -769,17 +767,15 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if k == "global_step": - self.assertTrue( - np.array_equal(v.numpy(), self.base_opti[v.name] + 1)) + np.testing.assert_array_equal(v.numpy(), + self.base_opti[v.name] + 1) if k.find("beta1_pow_acc_0") > 0: - self.assertTrue( - np.array_equal(v.numpy(), - self.base_opti[v.name] * adam._beta1)) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] * adam._beta1) if k.find("beta2_pow_acc_0") > 0: - self.assertTrue( - np.array_equal(v.numpy(), - self.base_opti[v.name] * adam._beta2)) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] * adam._beta2) # check parameter @@ -789,7 +785,7 @@ class TestDygraphPtbRnn(unittest.TestCase): new_t = v.numpy() base_t = self.model_base[k] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def func_testSetNumpyBeforeTrain(self): seed = 90 @@ -870,17 +866,15 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if k == "global_step": - self.assertTrue( - np.array_equal(v.numpy(), self.base_opti[v.name] + 1)) + np.testing.assert_array_equal(v.numpy(), + self.base_opti[v.name] + 1) if k.find("beta1_pow_acc_0") > 0: - self.assertTrue( - np.array_equal(v.numpy(), - self.base_opti[v.name] * adam._beta1)) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] * adam._beta1) if k.find("beta2_pow_acc_0") > 0: - self.assertTrue( - np.array_equal(v.numpy(), - self.base_opti[v.name] * adam._beta2)) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] * adam._beta2) # check parameter @@ -890,7 +884,7 @@ class TestDygraphPtbRnn(unittest.TestCase): new_t = v.numpy() base_t = self.model_base[k] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def func_testOnlyLoadParams(self): with fluid.dygraph.guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py index f0026f8ef33..5c67f5085d2 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load_v2.py @@ -401,8 +401,8 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if isinstance(v, (core.VarBase, core.eager.Tensor)): - self.assertTrue( - np.array_equal(v.numpy(), self.base_opti[v.name])) + np.testing.assert_array_equal(v.numpy(), + self.base_opti[v.name]) else: self.assertEqual(v, self.base_opti[k]) @@ -423,7 +423,7 @@ class TestDygraphPtbRnn(unittest.TestCase): base_t = self.model_base[k] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def func_testSetVariable(self): seed = 90 @@ -508,8 +508,8 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if isinstance(v, (core.VarBase, core.eager.Tensor)): - self.assertTrue( - np.array_equal(v.numpy(), self.base_opti[v.name])) + np.testing.assert_array_equal(v.numpy(), + self.base_opti[v.name]) else: self.assertEqual(v, self.base_opti[k]) @@ -530,7 +530,7 @@ class TestDygraphPtbRnn(unittest.TestCase): base_t = self.model_base[k] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def func_testSetNumpy(self): seed = 90 @@ -619,8 +619,8 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if isinstance(v, (core.VarBase, core.eager.Tensor)): - self.assertTrue( - np.array_equal(v.numpy(), self.base_opti[v.name])) + np.testing.assert_array_equal(v.numpy(), + self.base_opti[v.name]) else: self.assertEqual(v, self.base_opti[k]) @@ -643,7 +643,7 @@ class TestDygraphPtbRnn(unittest.TestCase): base_t = self.model_base[k] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def func_testSetVariableBeforeTrain(self): seed = 90 @@ -702,17 +702,15 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if k == "global_step": - self.assertTrue( - np.array_equal(v.numpy(), self.base_opti[v.name] + 1)) + np.testing.assert_array_equal(v.numpy(), + self.base_opti[v.name] + 1) if k.find("beta1_pow_acc_0") > 0: - self.assertTrue( - np.array_equal(v.numpy(), - self.base_opti[v.name] * adam._beta1)) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] * adam._beta1) if k.find("beta2_pow_acc_0") > 0: - self.assertTrue( - np.array_equal(v.numpy(), - self.base_opti[v.name] * adam._beta2)) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] * adam._beta2) state_dict = ptb_model.state_dict() @@ -720,7 +718,7 @@ class TestDygraphPtbRnn(unittest.TestCase): new_t = v.numpy() base_t = self.model_base[k] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def func_testLoadAndSetVarBaseBeforeTrain(self): seed = 90 @@ -790,17 +788,15 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if k == "global_step": - self.assertTrue( - np.array_equal(v.numpy(), self.base_opti[v.name] + 1)) + np.testing.assert_array_equal(v.numpy(), + self.base_opti[v.name] + 1) if k.find("beta1_pow_acc_0") > 0: - self.assertTrue( - np.array_equal(v.numpy(), - self.base_opti[v.name] * adam._beta1)) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] * adam._beta1) if k.find("beta2_pow_acc_0") > 0: - self.assertTrue( - np.array_equal(v.numpy(), - self.base_opti[v.name] * adam._beta2)) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] * adam._beta2) # check parameter @@ -810,7 +806,7 @@ class TestDygraphPtbRnn(unittest.TestCase): new_t = v.numpy() base_t = self.model_base[k] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def func_testSetNumpyBeforeTrain(self): seed = 90 @@ -892,18 +888,15 @@ class TestDygraphPtbRnn(unittest.TestCase): opti_dict = adam.state_dict() for k, v in opti_dict.items(): if k == "LR_Scheduler": - self.assertTrue( - np.array_equal(v['last_epoch'], - self.base_opti[k]['last_epoch'] + 1)) + np.testing.assert_array_equal( + v['last_epoch'], self.base_opti[k]['last_epoch'] + 1) if k.find("beta1_pow_acc_0") > 0: - self.assertTrue( - np.array_equal(v.numpy(), - self.base_opti[v.name] * adam._beta1)) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] * adam._beta1) if k.find("beta2_pow_acc_0") > 0: - self.assertTrue( - np.array_equal(v.numpy(), - self.base_opti[v.name] * adam._beta2)) + np.testing.assert_array_equal( + v.numpy(), self.base_opti[v.name] * adam._beta2) # check parameter @@ -913,7 +906,7 @@ class TestDygraphPtbRnn(unittest.TestCase): new_t = v.numpy() base_t = self.model_base[k] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def func_testOnlyLoadParams(self): with fluid.dygraph.guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py index 9f013157205..8268e52127e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py @@ -203,13 +203,11 @@ class TestDygraphSimpleNet(unittest.TestCase): static_param_updated[static_param_name_list[ k - 1]] = out[k] - self.assertTrue(np.array_equal(static_loss_value, - dy_loss_value)) + np.testing.assert_array_equal(static_loss_value, dy_loss_value) for key, value in six.iteritems(static_param_init): - self.assertTrue(np.array_equal(value, dy_param_init[key])) + np.testing.assert_array_equal(value, dy_param_init[key]) for key, value in six.iteritems(static_param_updated): - self.assertTrue(np.array_equal(value, - dy_param_updated[key])) + np.testing.assert_array_equal(value, dy_param_updated[key]) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py index d031cd84683..027bb2b9173 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py @@ -313,11 +313,11 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): self.load_and_train_static() # Phase 3. compare - self.assertTrue(np.array_equal(static_x_data, dy_x_data)) + np.testing.assert_array_equal(static_x_data, dy_x_data) for key, value in six.iteritems(static_param_init_value): key = dict_old_new_init[key] - self.assertTrue(np.array_equal(value, dy_param_init_value[key])) + np.testing.assert_array_equal(value, dy_param_init_value[key]) # np.testing.assert_array_almost_equal(static_out, dy_out) self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04)) @@ -341,10 +341,10 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): self.load_and_train_static() # Phase 3. compare - self.assertTrue(np.array_equal(static_x_data, dy_x_data)) + np.testing.assert_array_equal(static_x_data, dy_x_data) for key, value in six.iteritems(static_param_init_value): key = dict_old_new_init[key] - self.assertTrue(np.array_equal(value, dy_param_init_value[key])) + np.testing.assert_array_equal(value, dy_param_init_value[key]) # np.testing.assert_array_almost_equal(static_out, dy_out) self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04)) @@ -368,7 +368,7 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): self.load_and_infer_static() # Phase 3. compare - self.assertTrue(np.array_equal(static_x_data, dy_x_data)) + np.testing.assert_array_equal(static_x_data, dy_x_data) np.testing.assert_array_almost_equal(static_out, dy_out) self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py index 0c4dad64ada..1f7ac043d05 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py @@ -232,7 +232,7 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase): static_param_init_value.keys()) for key, value in six.iteritems(static_param_init_value): key = dict_old_new_init[key] - self.assertTrue(np.array_equal(value, dy_param_init_value[key])) + np.testing.assert_array_equal(value, dy_param_init_value[key]) self.assertTrue(np.allclose(static_out, dy_out)) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_trace_non_persistable_inputs.py b/python/paddle/fluid/tests/unittests/test_imperative_trace_non_persistable_inputs.py index 8a7fa967897..db655e4b4e6 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_trace_non_persistable_inputs.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_trace_non_persistable_inputs.py @@ -62,7 +62,7 @@ class TestTracedLayerRecordNonPersistableInput(unittest.TestCase): dygraph_out = layer(in_x) dygraph_out_numpy = dygraph_out.numpy() static_out = traced_layer([in_x])[0] - self.assertTrue(np.array_equal(dygraph_out_numpy, static_out)) + np.testing.assert_array_equal(dygraph_out_numpy, static_out) loss = fluid.layers.reduce_mean(dygraph_out) loss.backward() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py index 7f60d6c64ac..732de03ff0e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py @@ -1133,19 +1133,19 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): static_param_updated[static_param_name_list[k - 4]] = out[k] if _in_legacy_dygraph(): - self.assertTrue( - np.array_equal(static_avg_cost_value, dy_avg_cost_value)) - self.assertTrue( - np.array_equal(static_sum_cost_value, dy_sum_cost_value)) - self.assertTrue( - np.array_equal(static_predict_value, dy_predict_value)) - self.assertTrue( - np.array_equal(static_token_num_value, dy_token_num_value)) + np.testing.assert_array_equal(static_avg_cost_value, + dy_avg_cost_value) + np.testing.assert_array_equal(static_sum_cost_value, + dy_sum_cost_value) + np.testing.assert_array_equal(static_predict_value, + dy_predict_value) + np.testing.assert_array_equal(static_token_num_value, + dy_token_num_value) for key, value in six.iteritems(static_param_init): - self.assertTrue(np.array_equal(value, dy_param_init[key])) + np.testing.assert_array_equal(value, dy_param_init[key]) for key, value in six.iteritems(static_param_updated): - self.assertTrue(np.array_equal(value, dy_param_updated[key])) + np.testing.assert_array_equal(value, dy_param_updated[key]) # compare eager result with imperative result with guard(): @@ -1164,7 +1164,7 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): self.assertTrue(np.allclose(dy_token_num_value, eager_token_num_value)) for key, value in six.iteritems(static_param_init): - self.assertTrue(np.array_equal(value, eager_param_init[key])) + np.testing.assert_array_equal(value, eager_param_init[key]) for key, value in six.iteritems(dy_param_updated): self.assertTrue(np.allclose(value, eager_param_updated[key])) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py index b814ca87dcd..d3f2009e69d 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py @@ -78,37 +78,34 @@ class TestDygraphTripleGradMatmul(TestCase): new_a.backward() out_ref = np.ones([3, 3]) * 12.0 - self.assertTrue(np.array_equal(out.numpy(), out_ref)) + np.testing.assert_array_equal(out.numpy(), out_ref) new_x_g_ref = np.ones([3, 3]) * 6.0 new_y_g_ref = np.ones([3, 3]) * 6.0 - self.assertTrue(np.array_equal(new_x_g.numpy(), new_x_g_ref)) - self.assertTrue(np.array_equal(new_y_g.numpy(), new_y_g_ref)) + np.testing.assert_array_equal(new_x_g.numpy(), new_x_g_ref) + np.testing.assert_array_equal(new_y_g.numpy(), new_y_g_ref) new_a_ref = np.ones([3, 3]) * 3.0 new_b_ref = np.ones([3, 3]) * 3.0 new_c_ref = np.ones([3, 3]) * 12.0 - self.assertTrue(np.array_equal(new_a.numpy(), new_a_ref)) - self.assertTrue(np.array_equal(new_b.numpy(), new_b_ref)) - self.assertTrue(np.array_equal(new_c.numpy(), new_c_ref)) + np.testing.assert_array_equal(new_a.numpy(), new_a_ref) + np.testing.assert_array_equal(new_b.numpy(), new_b_ref) + np.testing.assert_array_equal(new_c.numpy(), new_c_ref) x_grad_ref = np.ones([3, 3]) * 0.0 - self.assertTrue(np.array_equal(x.grad.numpy(), x_grad_ref)) + np.testing.assert_array_equal(x.grad.numpy(), x_grad_ref) y_grad_ref = np.ones([3, 3]) * 0.0 - self.assertTrue(np.array_equal(y.grad.numpy(), y_grad_ref)) + np.testing.assert_array_equal(y.grad.numpy(), y_grad_ref) new_out_g_ref = np.ones([3, 3]) * 3.0 - self.assertTrue( - np.array_equal(new_out_g.grad.numpy(), new_out_g_ref)) + np.testing.assert_array_equal(new_out_g.grad.numpy(), new_out_g_ref) new_x_g_g_ref = np.ones([3, 3]) * 0.0 new_y_g_g_ref = np.ones([3, 3]) * 3.0 - self.assertTrue( - np.array_equal(new_x_g_g.grad.numpy(), new_x_g_g_ref)) - self.assertTrue( - np.array_equal(new_y_g_g.grad.numpy(), new_y_g_g_ref)) + np.testing.assert_array_equal(new_x_g_g.grad.numpy(), new_x_g_g_ref) + np.testing.assert_array_equal(new_y_g_g.grad.numpy(), new_y_g_g_ref) class TestDygraphTripleGrad(TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py b/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py index 84180fa299b..2cc157ae050 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_using_non_zero_gpu.py @@ -25,7 +25,7 @@ class TestImperativeUsingNonZeroGpu(unittest.TestCase): def run_main(self, np_arr, place): with guard(place): var = to_variable(np_arr) - self.assertTrue(np.array_equal(np_arr, var.numpy())) + np.testing.assert_array_equal(np_arr, var.numpy()) def func_non_zero_gpu(self): if not fluid.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_initializer.py b/python/paddle/fluid/tests/unittests/test_initializer.py index 7138c2393ff..df9236d245b 100644 --- a/python/paddle/fluid/tests/unittests/test_initializer.py +++ b/python/paddle/fluid/tests/unittests/test_initializer.py @@ -795,8 +795,8 @@ class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase): dynamic_res = run_dynamic_graph() static_res = run_static_graph() - self.assertTrue(np.array_equal(dynamic_res[0], static_res[0])) - self.assertTrue(np.array_equal(dynamic_res[1], static_res[1])) + np.testing.assert_array_equal(dynamic_res[0], static_res[0]) + np.testing.assert_array_equal(dynamic_res[1], static_res[1]) def test_order(self): with framework._test_eager_guard(): @@ -819,7 +819,7 @@ class TestOrthogonalInitializer1(unittest.TestCase): self.num_ops = 9 def check_result(self, a, b): - self.assertTrue(np.array_equal(a, b)) + np.testing.assert_array_equal(a, b) self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(10))) def func_orthogonal(self): @@ -878,7 +878,7 @@ class TestOrthogonalInitializer2(TestOrthogonalInitializer1): self.num_ops = 8 def check_result(self, a, b): - self.assertTrue(np.array_equal(a, b)) + np.testing.assert_array_equal(a, b) self.assertTrue(np.allclose(np.matmul(a.T, a), 4 * np.eye(10))) @@ -897,7 +897,7 @@ class TestOrthogonalInitializer3(TestOrthogonalInitializer1): self.num_ops = 8 def check_result(self, a, b): - self.assertTrue(np.array_equal(a, b)) + np.testing.assert_array_equal(a, b) self.assertTrue(np.allclose(np.matmul(a.T, a), np.eye(10), atol=1.e-6)) self.assertTrue(np.allclose(np.matmul(a, a.T), np.eye(10), atol=1.e-6)) @@ -922,7 +922,7 @@ class TestOrthogonalInitializer4(unittest.TestCase): self.kernel_size = (3, 3) def check_result(self, a, b): - self.assertTrue(np.array_equal(a, b)) + np.testing.assert_array_equal(a, b) a = a.reshape(6, -1) self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(6))) @@ -973,7 +973,7 @@ class TestOrthogonalInitializer5(TestOrthogonalInitializer4): self.kernel_size = (3, 3) def check_result(self, a, b): - self.assertTrue(np.array_equal(a, b)) + np.testing.assert_array_equal(a, b) a = a.reshape(50, -1) self.assertTrue(np.allclose(np.matmul(a.T, a), 4 * np.eye(36))) @@ -993,7 +993,7 @@ class TestOrthogonalInitializer6(TestOrthogonalInitializer4): self.kernel_size = (3, 3) def check_result(self, a, b): - self.assertTrue(np.array_equal(a, b)) + np.testing.assert_array_equal(a, b) a = a.reshape(36, -1) self.assertTrue(np.allclose(np.matmul(a.T, a), np.eye(36), atol=1.e-6)) self.assertTrue(np.allclose(np.matmul(a, a.T), np.eye(36), atol=1.e-6)) @@ -1014,8 +1014,8 @@ class TestDiracInitializer1(unittest.TestCase): self.num_ops = 8 #fill_constant*2, reshape*2, assign_value*2, scatter, cast def check_result(self, w_dygraph, w_static, conv_in, conv_out): - self.assertTrue(np.array_equal(w_dygraph, w_static)) - self.assertTrue(np.array_equal(conv_out, conv_in[:, 0:2, 1:9])) + np.testing.assert_array_equal(w_dygraph, w_static) + np.testing.assert_array_equal(conv_out, conv_in[:, 0:2, 1:9]) def func_dirac(self): self.config() @@ -1079,11 +1079,11 @@ class TestDiracInitializer2(TestDiracInitializer1): self.num_ops = 8 def check_result(self, w_dygraph, w_static, conv_in, conv_out): - self.assertTrue(np.array_equal(w_dygraph, w_static)) - self.assertTrue( - np.array_equal(conv_out[:, 0:4, :, :], conv_in[:, :, 1:9, 1:9])) - self.assertTrue( - np.array_equal(conv_out[:, 4:8, :, :], np.zeros([8, 4, 8, 8]))) + np.testing.assert_array_equal(w_dygraph, w_static) + np.testing.assert_array_equal(conv_out[:, 0:4, :, :], conv_in[:, :, 1:9, + 1:9]) + np.testing.assert_array_equal(conv_out[:, 4:8, :, :], + np.zeros([8, 4, 8, 8])) # initialize Conv3D weight @@ -1101,13 +1101,11 @@ class TestDiracInitializer3(TestDiracInitializer1): self.num_ops = 7 def check_result(self, w_dygraph, w_static, conv_in, conv_out): - self.assertTrue(np.array_equal(w_dygraph, w_static)) - self.assertTrue( - np.array_equal(conv_out[:, 0:5, :, :, :], conv_in[:, :, 1:9, 1:9, - 1:9])) - self.assertTrue( - np.array_equal(conv_out[:, 5:10, :, :, :], conv_in[:, :, 1:9, 1:9, - 1:9])) + np.testing.assert_array_equal(w_dygraph, w_static) + np.testing.assert_array_equal(conv_out[:, 0:5, :, :, :], + conv_in[:, :, 1:9, 1:9, 1:9]) + np.testing.assert_array_equal(conv_out[:, 5:10, :, :, :], + conv_in[:, :, 1:9, 1:9, 1:9]) def test_error(self): self.config() diff --git a/python/paddle/fluid/tests/unittests/test_inplace.py b/python/paddle/fluid/tests/unittests/test_inplace.py index b81fcd90746..94e30a5e8a1 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace.py +++ b/python/paddle/fluid/tests/unittests/test_inplace.py @@ -142,7 +142,7 @@ class TestDygraphInplace(unittest.TestCase): self.assertTrue(id(var) == id(inplace_var)) inplace_var[0] = 2. - self.assertTrue(np.array_equal(var.numpy(), inplace_var.numpy())) + np.testing.assert_array_equal(var.numpy(), inplace_var.numpy()) def test_inplace_api(self): with _test_eager_guard(): @@ -276,7 +276,7 @@ class TestDygraphInplace(unittest.TestCase): loss.backward() grad_var_a = var_a.grad.numpy() - self.assertTrue(np.array_equal(grad_var_a_inplace, grad_var_a)) + np.testing.assert_array_equal(grad_var_a_inplace, grad_var_a) def test_backward_success_2(self): with _test_eager_guard(): @@ -506,7 +506,7 @@ class TestLossIsInplaceVar(unittest.TestCase): loss.backward() grad_var_a = var_a.grad.numpy() - self.assertTrue(np.array_equal(inplace_grad_var_a, grad_var_a)) + np.testing.assert_array_equal(inplace_grad_var_a, grad_var_a) def test_loss_is_inplace_var(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py b/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py index 39e493b1b34..73305368d4c 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py @@ -108,7 +108,7 @@ class TestInplaceAddto(unittest.TestCase): res1, w1 = run_program(True) res2, w2 = run_program(False) - self.assertTrue(np.array_equal(res1, res2)) + np.testing.assert_array_equal(res1, res2) def test_nchw(self): self.check_result() diff --git a/python/paddle/fluid/tests/unittests/test_inplace_auto_generated_apis.py b/python/paddle/fluid/tests/unittests/test_inplace_auto_generated_apis.py index 581ce0d5d02..2fc112870c6 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_auto_generated_apis.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_auto_generated_apis.py @@ -56,7 +56,7 @@ class TestStaticAutoGeneratedAPI(unittest.TestCase): feed={"x": self.np_x}, fetch_list=[x, out]) - self.assertTrue(np.array_equal(fetch_x, self.np_x)) + np.testing.assert_array_equal(fetch_x, self.np_x) self.assertTrue( self.np_compare(fetch_out, self.executed_numpy_api(self.np_x))) diff --git a/python/paddle/fluid/tests/unittests/test_jit_save_load.py b/python/paddle/fluid/tests/unittests/test_jit_save_load.py index 6aef26ac65b..ab6dea3940e 100644 --- a/python/paddle/fluid/tests/unittests/test_jit_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_jit_save_load.py @@ -388,9 +388,9 @@ class TestJitSaveLoad(unittest.TestCase): # inference & compare x = fluid.dygraph.to_variable( np.random.random((1, 784)).astype('float32')) - self.assertTrue( - np.array_equal(train_layer(x).numpy(), - infer_layer(x).numpy())) + np.testing.assert_array_equal( + train_layer(x).numpy(), + infer_layer(x).numpy()) def load_and_finetune(self, train_layer, load_train_layer): train_layer.train() @@ -398,8 +398,8 @@ class TestJitSaveLoad(unittest.TestCase): # train & compare img0, _, train_loss = train(train_layer) img1, _, load_train_loss = train(load_train_layer) - self.assertTrue( - np.array_equal(train_loss.numpy(), load_train_loss.numpy())) + np.testing.assert_array_equal(train_loss.numpy(), + load_train_loss.numpy()) def load_dygraph_state_dict(self, train_layer): train_layer.eval() @@ -414,9 +414,9 @@ class TestJitSaveLoad(unittest.TestCase): # inference & compare x = fluid.dygraph.to_variable( np.random.random((1, 784)).astype('float32')) - self.assertTrue( - np.array_equal(train_layer(x).numpy(), - new_layer(x).numpy())) + np.testing.assert_array_equal( + train_layer(x).numpy(), + new_layer(x).numpy()) def test_load_dygraph_no_path(self): model_path = os.path.join(self.temp_dir.name, @@ -673,9 +673,9 @@ class TestJitSaveLoadConfig(unittest.TestCase): infer_layer = paddle.jit.load(model_path) x = fluid.dygraph.to_variable( np.random.random((4, 8)).astype('float32')) - self.assertTrue( - np.array_equal(train_layer(x)[0].numpy(), - infer_layer(x).numpy())) + np.testing.assert_array_equal( + train_layer(x)[0].numpy(), + infer_layer(x).numpy()) def test_save_no_support_config_error(self): layer = LinearNet(784, 1) @@ -778,9 +778,9 @@ class TestJitPruneModelAndLoad(unittest.TestCase): x = fluid.dygraph.to_variable( np.random.random((4, 8)).astype('float32')) - self.assertTrue( - np.array_equal(train_layer(x)[0].numpy(), - infer_layer(x).numpy())) + np.testing.assert_array_equal( + train_layer(x)[0].numpy(), + infer_layer(x).numpy()) def test_load_var_not_in_extra_var_info(self): self.train_and_save() @@ -831,10 +831,12 @@ class TestJitSaveMultiCases(unittest.TestCase): else: pred = layer(x).numpy() loaded_pred = loaded_layer(x).numpy() - self.assertTrue( - np.array_equal(pred, loaded_pred), - msg="Result diff when load and inference:\nlayer result:\n{}\n" \ - "loaded layer result:\n{}".format(pred, loaded_pred)) + np.testing.assert_array_equal( + pred, + loaded_pred, + err_msg= + 'Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}' + .format(pred, loaded_pred)) def test_no_prune_to_static_after_train(self): layer = LinearNet(784, 1) @@ -1056,7 +1058,7 @@ class TestJitSaveLoadEmptyLayer(unittest.TestCase): paddle.jit.save(layer, self.model_path) load_layer = paddle.jit.load(self.model_path) load_out = load_layer(x) - self.assertTrue(np.array_equal(out, load_out)) + np.testing.assert_array_equal(out, load_out) class TestJitSaveLoadNoParamLayer(unittest.TestCase): @@ -1079,7 +1081,7 @@ class TestJitSaveLoadNoParamLayer(unittest.TestCase): paddle.jit.save(layer, self.model_path) load_layer = paddle.jit.load(self.model_path) load_out = load_layer(x, y) - self.assertTrue(np.array_equal(out, load_out)) + np.testing.assert_array_equal(out, load_out) class TestJitSaveLoadMultiMethods(unittest.TestCase): @@ -1506,7 +1508,7 @@ class TestJitSaveLoadFunctionWithParamCase1(unittest.TestCase): load_func = paddle.jit.load(path) load_result = load_func(inps) - self.assertTrue(np.array_equal(load_result.numpy(), origin.numpy())) + np.testing.assert_array_equal(load_result.numpy(), origin.numpy()) class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase): @@ -1546,8 +1548,8 @@ class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase): load_result = load_func(inps) - self.assertTrue( - np.array_equal(origin_result.numpy(), load_result.numpy())) + np.testing.assert_array_equal(origin_result.numpy(), + load_result.numpy()) class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase): @@ -1586,7 +1588,7 @@ class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase): load_func = paddle.jit.load(path) load_result = load_func(inps) - self.assertTrue(np.array_equal(load_result.numpy(), origin.numpy())) + np.testing.assert_array_equal(load_result.numpy(), origin.numpy()) class TestJitSaveLoadDataParallel(unittest.TestCase): @@ -1605,10 +1607,12 @@ class TestJitSaveLoadDataParallel(unittest.TestCase): x = paddle.to_tensor(np.random.random((1, 784)).astype('float32')) pred = layer(x).numpy() loaded_pred = loaded_layer(x).numpy() - self.assertTrue( - np.array_equal(pred, loaded_pred), - msg="Result diff when load and inference:\nlayer result:\n{}\n" \ - "loaded layer result:\n{}".format(pred, loaded_pred)) + np.testing.assert_array_equal( + pred, + loaded_pred, + err_msg= + 'Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}' + .format(pred, loaded_pred)) def test_jit_save_data_parallel_with_inputspec(self): layer = LinearNetNotDeclarative(784, 1) diff --git a/python/paddle/fluid/tests/unittests/test_lambv2_op.py b/python/paddle/fluid/tests/unittests/test_lambv2_op.py index 6ae2dbfb590..54f84a1bb9b 100644 --- a/python/paddle/fluid/tests/unittests/test_lambv2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lambv2_op.py @@ -237,8 +237,8 @@ class TestLambOpMultiPrecision(unittest.TestCase): if multi_precision: params[0] = np.array(params[0]) params[1] = np.array(params[1]) - self.assertTrue( - np.array_equal(params[0], params[1].astype(np.float16))) + np.testing.assert_array_equal(params[0], + params[1].astype(np.float16)) return params[0].astype(np.float32) else: self.assertTrue(params[0] is not None) @@ -259,9 +259,8 @@ class TestLambOpMultiPrecision(unittest.TestCase): fetch_list=[weight, bias]) weight_np = weight_np.astype('float32') bias_np = bias_np.astype('float32') - self.assertTrue(np.array_equal(weight_np, - get_parameter(weight))) - self.assertTrue(np.array_equal(bias_np, get_parameter(bias))) + np.testing.assert_array_equal(weight_np, get_parameter(weight)) + np.testing.assert_array_equal(bias_np, get_parameter(bias)) return weight_np, bias_np @switch_to_static_graph diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py index 2ee1a1ba76f..1bae5b75210 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py @@ -381,7 +381,7 @@ class TestFP16ScaleBiasLayerNorm(unittest.TestCase): x_np, weight_np, bias_np, 'float32') def assert_equal(x, y): - self.assertTrue(np.array_equal(x, y)) + np.testing.assert_array_equal(x, y) assert_equal(y_np_1, y_np_2) assert_equal(x_g_np_1, x_g_np_2) diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 551ba3ffb54..20bc86646f7 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -109,16 +109,16 @@ class TestLayer(LayerTest): x = base.to_variable(inp) custom = CustomLayer(input_size=3, linear1_size=2) ret = custom(x, do_linear2=False) - self.assertTrue(np.array_equal(ret.numpy().shape, [3, 2])) + np.testing.assert_array_equal(ret.numpy().shape, [3, 2]) ret = custom(x, do_linear2=True) - self.assertTrue(np.array_equal(ret.numpy().shape, [3, 1])) + np.testing.assert_array_equal(ret.numpy().shape, [3, 1]) inp = np.ones([3, 3], dtype='float32') x = base.to_variable(inp) custom = CustomLayer(input_size=3, linear1_size=2) ret = custom(x, do_linear2=False) - self.assertTrue(np.array_equal(ret.numpy().shape, [3, 2])) + np.testing.assert_array_equal(ret.numpy().shape, [3, 2]) ret = custom(x, do_linear2=True) - self.assertTrue(np.array_equal(ret.numpy().shape, [3, 1])) + np.testing.assert_array_equal(ret.numpy().shape, [3, 1]) def test_dropout(self): inp = np.ones([3, 32, 32], dtype='float32') @@ -157,12 +157,12 @@ class TestLayer(LayerTest): dy_ret_value = dy_ret.numpy() dy_ret2_value = dy_ret2.numpy() - self.assertTrue(np.array_equal(dy_eager_ret_value, dy_eager_ret2_value)) - self.assertTrue(np.array_equal(static_ret, dy_eager_ret_value)) + np.testing.assert_array_equal(dy_eager_ret_value, dy_eager_ret2_value) + np.testing.assert_array_equal(static_ret, dy_eager_ret_value) - self.assertTrue(np.array_equal(static_ret, static_ret2)) - self.assertTrue(np.array_equal(dy_ret_value, dy_ret2_value)) - self.assertTrue(np.array_equal(static_ret, dy_ret_value)) + np.testing.assert_array_equal(static_ret, static_ret2) + np.testing.assert_array_equal(dy_ret_value, dy_ret2_value) + np.testing.assert_array_equal(static_ret, dy_ret_value) def test_linear(self): inp = np.ones([3, 32, 32], dtype='float32') @@ -192,8 +192,8 @@ class TestLayer(LayerTest): dy_ret = linear(t) dy_ret_value = dy_ret.numpy() - self.assertTrue(np.array_equal(static_ret, dy_eager_ret_value)) - self.assertTrue(np.array_equal(static_ret, dy_ret_value)) + np.testing.assert_array_equal(static_ret, dy_eager_ret_value) + np.testing.assert_array_equal(static_ret, dy_ret_value) with self.static_graph(): @@ -243,8 +243,8 @@ class TestLayer(LayerTest): dy_ret = flatten(t) dy_ret_value = dy_ret.numpy() - self.assertTrue(np.array_equal(static_ret, dy_eager_ret_value)) - self.assertTrue(np.array_equal(static_ret, dy_ret_value)) + np.testing.assert_array_equal(static_ret, dy_eager_ret_value) + np.testing.assert_array_equal(static_ret, dy_ret_value) with self.static_graph(): @@ -338,9 +338,9 @@ class TestLayer(LayerTest): self.assertFalse(hasattr(lm, "_scale_w")) self.assertFalse(hasattr(lm, "_bias_w")) - self.assertTrue(np.array_equal(static_ret, static_ret2)) - self.assertTrue(np.array_equal(dy_eager_ret_value, static_ret2)) - self.assertTrue(np.array_equal(dy_ret_value, static_ret2)) + np.testing.assert_array_equal(static_ret, static_ret2) + np.testing.assert_array_equal(dy_eager_ret_value, static_ret2) + np.testing.assert_array_equal(dy_ret_value, static_ret2) with self.dynamic_graph(): with _test_eager_guard(): @@ -379,8 +379,8 @@ class TestLayer(LayerTest): my_syncbn = paddle.nn.SyncBatchNorm(3) dy_ret = my_syncbn(base.to_variable(t)) dy_ret_value = dy_ret.numpy() - self.assertTrue(np.array_equal(static_ret, dy_ret_value)) - self.assertTrue(np.array_equal(static_ret, dy_eager_ret_value)) + np.testing.assert_array_equal(static_ret, dy_ret_value) + np.testing.assert_array_equal(static_ret, dy_eager_ret_value) def test_relu(self): with self.static_graph(): @@ -536,21 +536,19 @@ class TestLayer(LayerTest): self.assertFalse( np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())) conv2d2.weight.set_value(conv2d1_weight_np) - self.assertTrue( - np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())) + np.testing.assert_array_equal(conv2d1_weight_np, + conv2d2.weight.numpy()) conv2d2.bias.set_value(conv2d1_bias) dy_ret1 = conv2d1(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images)) - self.assertTrue(np.array_equal(dy_ret1.numpy(), - dy_ret2.numpy())) + np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) conv2d2.weight = conv2d1.weight conv2d2.bias = conv2d1.bias - self.assertTrue( - np.array_equal(conv2d1.weight.numpy(), - conv2d2.weight.numpy())) - self.assertTrue( - np.array_equal(conv2d1.bias.numpy(), conv2d2.bias.numpy())) + np.testing.assert_array_equal(conv2d1.weight.numpy(), + conv2d2.weight.numpy()) + np.testing.assert_array_equal(conv2d1.bias.numpy(), + conv2d2.bias.numpy()) images = np.ones([2, 3, 5, 5], dtype='float32') custom_weight = np.random.randn(3, 3, 2, 2).astype("float32") @@ -572,19 +570,19 @@ class TestLayer(LayerTest): self.assertFalse( np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())) conv2d2.weight.set_value(conv2d1_weight_np) - self.assertTrue( - np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())) + np.testing.assert_array_equal(conv2d1_weight_np, + conv2d2.weight.numpy()) conv2d2.bias.set_value(conv2d1_bias) dy_ret1 = conv2d1(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images)) - self.assertTrue(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) + np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) conv2d2.weight = conv2d1.weight conv2d2.bias = conv2d1.bias - self.assertTrue( - np.array_equal(conv2d1.weight.numpy(), conv2d2.weight.numpy())) - self.assertTrue( - np.array_equal(conv2d1.bias.numpy(), conv2d2.bias.numpy())) + np.testing.assert_array_equal(conv2d1.weight.numpy(), + conv2d2.weight.numpy()) + np.testing.assert_array_equal(conv2d1.bias.numpy(), + conv2d2.bias.numpy()) def test_gru_unit(self): lod = [[2, 4, 3]] @@ -666,14 +664,14 @@ class TestLayer(LayerTest): dy_ret2 = gru2(base.to_variable(input), base.to_variable(hidden_input)) for o1, o2 in zip(dy_ret1, dy_ret2): - self.assertTrue(np.array_equal(o1.numpy(), o2.numpy())) + np.testing.assert_array_equal(o1.numpy(), o2.numpy()) gru2.weight = gru1.weight gru2.bias = gru1.bias - self.assertTrue( - np.array_equal(gru1.weight.numpy(), gru2.weight.numpy())) - self.assertTrue( - np.array_equal(gru1.bias.numpy(), gru2.bias.numpy())) + np.testing.assert_array_equal(gru1.weight.numpy(), + gru2.weight.numpy()) + np.testing.assert_array_equal(gru1.bias.numpy(), + gru2.bias.numpy()) custom_weight = np.random.randn(D, D * 3).astype("float32") weight_attr = fluid.ParamAttr(initializer=fluid.initializer. @@ -695,14 +693,13 @@ class TestLayer(LayerTest): dy_ret2 = gru2(base.to_variable(input), base.to_variable(hidden_input)) for o1, o2 in zip(dy_ret1, dy_ret2): - self.assertTrue(np.array_equal(o1.numpy(), o2.numpy())) + np.testing.assert_array_equal(o1.numpy(), o2.numpy()) gru2.weight = gru1.weight gru2.bias = gru1.bias - self.assertTrue( - np.array_equal(gru1.weight.numpy(), gru2.weight.numpy())) - self.assertTrue(np.array_equal(gru1.bias.numpy(), - gru2.bias.numpy())) + np.testing.assert_array_equal(gru1.weight.numpy(), + gru2.weight.numpy()) + np.testing.assert_array_equal(gru1.bias.numpy(), gru2.bias.numpy()) def test_elementwise_math(self): n = np.ones([3, 3], dtype='float32') @@ -816,8 +813,8 @@ class TestLayer(LayerTest): }, fetch_list=[out], with_lod=True)[0] - self.assertTrue( - np.array_equal(np.array(static_rlt), np.array(static_rlt2))) + np.testing.assert_array_equal(np.array(static_rlt), + np.array(static_rlt2)) def test_conv2d_transpose(self): inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32') @@ -889,21 +886,19 @@ class TestLayer(LayerTest): self.assertFalse( np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())) conv2d2.weight.set_value(conv2d1_weight_np) - self.assertTrue( - np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())) + np.testing.assert_array_equal(conv2d1_weight_np, + conv2d2.weight.numpy()) conv2d2.bias.set_value(conv2d1_bias) dy_ret1 = conv2d1(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images)) - self.assertTrue(np.array_equal(dy_ret1.numpy(), - dy_ret2.numpy())) + np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) conv2d2.weight = conv2d1.weight conv2d2.bias = conv2d1.bias - self.assertTrue( - np.array_equal(conv2d1.weight.numpy(), - conv2d2.weight.numpy())) - self.assertTrue( - np.array_equal(conv2d1.bias.numpy(), conv2d2.bias.numpy())) + np.testing.assert_array_equal(conv2d1.weight.numpy(), + conv2d2.weight.numpy()) + np.testing.assert_array_equal(conv2d1.bias.numpy(), + conv2d2.bias.numpy()) images = np.ones([2, 3, 5, 5], dtype='float32') custom_weight = np.random.randn(3, 3, 2, 2).astype("float32") @@ -925,19 +920,19 @@ class TestLayer(LayerTest): self.assertFalse( np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())) conv2d2.weight.set_value(conv2d1_weight_np) - self.assertTrue( - np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy())) + np.testing.assert_array_equal(conv2d1_weight_np, + conv2d2.weight.numpy()) conv2d2.bias.set_value(conv2d1_bias) dy_ret1 = conv2d1(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images)) - self.assertTrue(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) + np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) conv2d2.weight = conv2d1.weight conv2d2.bias = conv2d1.bias - self.assertTrue( - np.array_equal(conv2d1.weight.numpy(), conv2d2.weight.numpy())) - self.assertTrue( - np.array_equal(conv2d1.bias.numpy(), conv2d2.bias.numpy())) + np.testing.assert_array_equal(conv2d1.weight.numpy(), + conv2d2.weight.numpy()) + np.testing.assert_array_equal(conv2d1.bias.numpy(), + conv2d2.bias.numpy()) with self.static_graph(): @@ -1064,11 +1059,11 @@ class TestLayer(LayerTest): }, fetch_list=[out2])[0] - self.assertTrue(np.array_equal(dy_rlt2_value, static_rlt3)) - self.assertTrue(np.array_equal(dy_eager_rlt2_value, static_rlt3)) - self.assertTrue(np.array_equal(static_rlt2, static_rlt)) - self.assertTrue(np.array_equal(dy_rlt_value, static_rlt)) - self.assertTrue(np.array_equal(dy_eager_rlt_value, static_rlt)) + np.testing.assert_array_equal(dy_rlt2_value, static_rlt3) + np.testing.assert_array_equal(dy_eager_rlt2_value, static_rlt3) + np.testing.assert_array_equal(static_rlt2, static_rlt) + np.testing.assert_array_equal(dy_rlt_value, static_rlt) + np.testing.assert_array_equal(dy_eager_rlt_value, static_rlt) with self.dynamic_graph(): with _test_eager_guard(): @@ -1094,15 +1089,14 @@ class TestLayer(LayerTest): base.to_variable(inp_np_y)) dy_rlt2 = btp2(base.to_variable(inp_np_x), base.to_variable(inp_np_y)) - self.assertTrue(np.array_equal(dy_rlt1.numpy(), - dy_rlt2.numpy())) + np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()) btp2.weight = btp1.weight btp2.bias = btp1.bias - self.assertTrue( - np.array_equal(btp1.weight.numpy(), btp2.weight.numpy())) - self.assertTrue( - np.array_equal(btp1.bias.numpy(), btp2.bias.numpy())) + np.testing.assert_array_equal(btp1.weight.numpy(), + btp2.weight.numpy()) + np.testing.assert_array_equal(btp1.bias.numpy(), + btp2.bias.numpy()) custom_weight = np.random.randn(6, 3, 3).astype("float32") weight_attr = fluid.ParamAttr(initializer=fluid.initializer. @@ -1124,14 +1118,13 @@ class TestLayer(LayerTest): base.to_variable(inp_np_y)) dy_rlt2 = btp2(base.to_variable(inp_np_x), base.to_variable(inp_np_y)) - self.assertTrue(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())) + np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()) btp2.weight = btp1.weight btp2.bias = btp1.bias - self.assertTrue( - np.array_equal(btp1.weight.numpy(), btp2.weight.numpy())) - self.assertTrue(np.array_equal(btp1.bias.numpy(), - btp2.bias.numpy())) + np.testing.assert_array_equal(btp1.weight.numpy(), + btp2.weight.numpy()) + np.testing.assert_array_equal(btp1.bias.numpy(), btp2.bias.numpy()) def prelu_test(self, mode): inp_np = np.ones([5, 200, 100, 100]).astype('float32') @@ -1204,13 +1197,11 @@ class TestLayer(LayerTest): prelu2.weight.set_value(prelu1.weight.numpy()) dy_rlt1 = prelu1(inp) dy_rlt2 = prelu2(inp) - self.assertTrue(np.array_equal(dy_rlt1.numpy(), - dy_rlt2.numpy())) + np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()) prelu2.weight = prelu1.weight - self.assertTrue( - np.array_equal(prelu1.weight.numpy(), - prelu2.weight.numpy())) + np.testing.assert_array_equal(prelu1.weight.numpy(), + prelu2.weight.numpy()) inp_np = np.random.randn(5, 200, 100, 100).astype("float32") inp = base.to_variable(inp_np) @@ -1230,11 +1221,11 @@ class TestLayer(LayerTest): prelu2.weight.set_value(prelu1.weight.numpy()) dy_rlt1 = prelu1(inp) dy_rlt2 = prelu2(inp) - self.assertTrue(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())) + np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()) prelu2.weight = prelu1.weight - self.assertTrue( - np.array_equal(prelu1.weight.numpy(), prelu2.weight.numpy())) + np.testing.assert_array_equal(prelu1.weight.numpy(), + prelu2.weight.numpy()) def test_prelu(self): self.prelu_test("channel") @@ -1292,16 +1283,16 @@ class TestLayer(LayerTest): rep2 = emb2(base.to_variable(inp_word)) self.assertFalse( np.array_equal(emb1.weight.numpy(), custom_weight)) - self.assertTrue( - np.array_equal(emb2.weight.numpy(), custom_weight)) + np.testing.assert_array_equal(emb2.weight.numpy(), + custom_weight) self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy())) emb2.weight.set_value(emb1.weight.numpy()) rep2 = emb2(base.to_variable(inp_word)) - self.assertTrue(np.array_equal(rep1.numpy(), rep2.numpy())) + np.testing.assert_array_equal(rep1.numpy(), rep2.numpy()) emb2.weight = emb1.weight - self.assertTrue( - np.array_equal(emb1.weight.numpy(), emb2.weight.numpy())) + np.testing.assert_array_equal(emb1.weight.numpy(), + emb2.weight.numpy()) custom_weight = np.random.randn(dict_size, 32).astype("float32") weight_attr = fluid.ParamAttr(initializer=fluid.initializer. @@ -1313,15 +1304,15 @@ class TestLayer(LayerTest): rep1 = emb1(base.to_variable(inp_word)) rep2 = emb2(base.to_variable(inp_word)) self.assertFalse(np.array_equal(emb1.weight.numpy(), custom_weight)) - self.assertTrue(np.array_equal(emb2.weight.numpy(), custom_weight)) + np.testing.assert_array_equal(emb2.weight.numpy(), custom_weight) self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy())) emb2.weight.set_value(emb1.weight.numpy()) rep2 = emb2(base.to_variable(inp_word)) - self.assertTrue(np.array_equal(rep1.numpy(), rep2.numpy())) + np.testing.assert_array_equal(rep1.numpy(), rep2.numpy()) emb2.weight = emb1.weight - self.assertTrue( - np.array_equal(emb1.weight.numpy(), emb2.weight.numpy())) + np.testing.assert_array_equal(emb1.weight.numpy(), + emb2.weight.numpy()) def test_nce(self): window_size = 5 @@ -1543,15 +1534,15 @@ class TestLayer(LayerTest): nce2.bias.set_value(nce1.bias) nce1_loss = nce1(embs3, wl) nce2_loss = nce2(embs3, wl) - self.assertTrue( - np.array_equal(nce1_loss.numpy(), nce2_loss.numpy())) + np.testing.assert_array_equal(nce1_loss.numpy(), + nce2_loss.numpy()) nce2.weight = nce1.weight nce2.bias = nce1.bias - self.assertTrue( - np.array_equal(nce1.weight.numpy(), nce2.weight.numpy())) - self.assertTrue( - np.array_equal(nce1.bias.numpy(), nce2.bias.numpy())) + np.testing.assert_array_equal(nce1.weight.numpy(), + nce2.weight.numpy()) + np.testing.assert_array_equal(nce1.bias.numpy(), + nce2.bias.numpy()) custom_weight = np.random.randn(dict_size, 128).astype("float32") weight_attr = fluid.ParamAttr(initializer=fluid.initializer. @@ -1605,15 +1596,13 @@ class TestLayer(LayerTest): nce2.bias.set_value(nce1.bias) nce1_loss = nce1(embs3, wl) nce2_loss = nce2(embs3, wl) - self.assertTrue(np.array_equal(nce1_loss.numpy(), - nce2_loss.numpy())) + np.testing.assert_array_equal(nce1_loss.numpy(), nce2_loss.numpy()) nce2.weight = nce1.weight nce2.bias = nce1.bias - self.assertTrue( - np.array_equal(nce1.weight.numpy(), nce2.weight.numpy())) - self.assertTrue(np.array_equal(nce1.bias.numpy(), - nce2.bias.numpy())) + np.testing.assert_array_equal(nce1.weight.numpy(), + nce2.weight.numpy()) + np.testing.assert_array_equal(nce1.bias.numpy(), nce2.bias.numpy()) def test_one_hot(self): with self.dynamic_graph(): @@ -1623,16 +1612,15 @@ class TestLayer(LayerTest): one_hot_label1 = fluid.layers.one_hot(input=label, depth=4) one_hot_label2 = fluid.layers.one_hot( input=label, depth=fluid.dygraph.to_variable(np.array([4]))) - self.assertTrue( - np.array_equal(one_hot_label1.numpy(), - one_hot_label2.numpy())) + np.testing.assert_array_equal(one_hot_label1.numpy(), + one_hot_label2.numpy()) label = fluid.dygraph.to_variable(np.array([[1], [1], [3], [0]])) one_hot_label1 = fluid.layers.one_hot(input=label, depth=4) one_hot_label2 = fluid.layers.one_hot( input=label, depth=fluid.dygraph.to_variable(np.array([4]))) - self.assertTrue( - np.array_equal(one_hot_label1.numpy(), one_hot_label2.numpy())) + np.testing.assert_array_equal(one_hot_label1.numpy(), + one_hot_label2.numpy()) def test_split(self): with self.dynamic_graph(): @@ -1643,8 +1631,8 @@ class TestLayer(LayerTest): num_or_sections=2, dim=fluid.dygraph.to_variable( np.array([1]))) - self.assertTrue(np.array_equal(x0.numpy(), x00.numpy())) - self.assertTrue(np.array_equal(x1.numpy(), x11.numpy())) + np.testing.assert_array_equal(x0.numpy(), x00.numpy()) + np.testing.assert_array_equal(x1.numpy(), x11.numpy()) input = fluid.dygraph.to_variable(np.random.random((3, 8, 5))) x0, x1 = fluid.layers.split(input, num_or_sections=2, dim=1) @@ -1652,8 +1640,8 @@ class TestLayer(LayerTest): num_or_sections=2, dim=fluid.dygraph.to_variable( np.array([1]))) - self.assertTrue(np.array_equal(x0.numpy(), x00.numpy())) - self.assertTrue(np.array_equal(x1.numpy(), x11.numpy())) + np.testing.assert_array_equal(x0.numpy(), x00.numpy()) + np.testing.assert_array_equal(x1.numpy(), x11.numpy()) def test_topk(self): with self.dynamic_graph(): @@ -1662,20 +1650,19 @@ class TestLayer(LayerTest): top5_values1, top5_indices1 = layers.topk(input, k=5) top5_values2, top5_indices2 = layers.topk( input, k=fluid.dygraph.to_variable(np.array([5]))) - self.assertTrue( - np.array_equal(top5_values1.numpy(), top5_values2.numpy())) - self.assertTrue( - np.array_equal(top5_indices1.numpy(), - top5_indices2.numpy())) + np.testing.assert_array_equal(top5_values1.numpy(), + top5_values2.numpy()) + np.testing.assert_array_equal(top5_indices1.numpy(), + top5_indices2.numpy()) input = fluid.dygraph.to_variable(np.random.random((13, 11))) top5_values1, top5_indices1 = layers.topk(input, k=5) top5_values2, top5_indices2 = layers.topk( input, k=fluid.dygraph.to_variable(np.array([5]))) - self.assertTrue( - np.array_equal(top5_values1.numpy(), top5_values2.numpy())) - self.assertTrue( - np.array_equal(top5_indices1.numpy(), top5_indices2.numpy())) + np.testing.assert_array_equal(top5_values1.numpy(), + top5_values2.numpy()) + np.testing.assert_array_equal(top5_indices1.numpy(), + top5_indices2.numpy()) def test_conv3d(self): with self.static_graph(): @@ -1737,21 +1724,19 @@ class TestLayer(LayerTest): self.assertFalse( np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())) conv3d2.weight.set_value(conv3d1_weight_np) - self.assertTrue( - np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())) + np.testing.assert_array_equal(conv3d1_weight_np, + conv3d2.weight.numpy()) conv3d1.bias.set_value(conv3d1_bias) dy_ret1 = conv3d1(base.to_variable(images)) dy_ret2 = conv3d2(base.to_variable(images)) - self.assertTrue(np.array_equal(dy_ret1.numpy(), - dy_ret2.numpy())) + np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) conv3d2.weight = conv3d1.weight conv3d2.bias = conv3d1.bias - self.assertTrue( - np.array_equal(conv3d1.weight.numpy(), - conv3d2.weight.numpy())) - self.assertTrue( - np.array_equal(conv3d1.bias.numpy(), conv3d2.bias.numpy())) + np.testing.assert_array_equal(conv3d1.weight.numpy(), + conv3d2.weight.numpy()) + np.testing.assert_array_equal(conv3d1.bias.numpy(), + conv3d2.bias.numpy()) images = np.ones([2, 3, 6, 6, 6], dtype='float32') custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32") @@ -1771,19 +1756,19 @@ class TestLayer(LayerTest): self.assertFalse( np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())) conv3d2.weight.set_value(conv3d1_weight_np) - self.assertTrue( - np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())) + np.testing.assert_array_equal(conv3d1_weight_np, + conv3d2.weight.numpy()) conv3d1.bias.set_value(conv3d1_bias) dy_ret1 = conv3d1(base.to_variable(images)) dy_ret2 = conv3d2(base.to_variable(images)) - self.assertTrue(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) + np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) conv3d2.weight = conv3d1.weight conv3d2.bias = conv3d1.bias - self.assertTrue( - np.array_equal(conv3d1.weight.numpy(), conv3d2.weight.numpy())) - self.assertTrue( - np.array_equal(conv3d1.bias.numpy(), conv3d2.bias.numpy())) + np.testing.assert_array_equal(conv3d1.weight.numpy(), + conv3d2.weight.numpy()) + np.testing.assert_array_equal(conv3d1.bias.numpy(), + conv3d2.bias.numpy()) def test_row_conv(self): input = np.arange(15).reshape([3, 5]).astype('float32') @@ -2142,17 +2127,14 @@ class TestLayer(LayerTest): base.to_variable(adj)) dy_ret2 = treeConv2(base.to_variable(vectors), base.to_variable(adj)) - self.assertTrue(np.array_equal(dy_ret1.numpy(), - dy_ret2.numpy())) + np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) treeConv2.weight = treeConv1.weight treeConv2.bias = treeConv1.bias - self.assertTrue( - np.array_equal(treeConv1.weight.numpy(), - treeConv2.weight.numpy())) - self.assertTrue( - np.array_equal(treeConv1.bias.numpy(), - treeConv2.bias.numpy())) + np.testing.assert_array_equal(treeConv1.weight.numpy(), + treeConv2.weight.numpy()) + np.testing.assert_array_equal(treeConv1.bias.numpy(), + treeConv2.bias.numpy()) custom_weight = np.random.randn(5, 3, 6, 1).astype("float32") weight_attr = fluid.ParamAttr(initializer=fluid.initializer. @@ -2179,15 +2161,14 @@ class TestLayer(LayerTest): base.to_variable(adj)) dy_ret2 = treeConv2(base.to_variable(vectors), base.to_variable(adj)) - self.assertTrue(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) + np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) treeConv2.weight = treeConv1.weight treeConv2.bias = treeConv1.bias - self.assertTrue( - np.array_equal(treeConv1.weight.numpy(), - treeConv2.weight.numpy())) - self.assertTrue( - np.array_equal(treeConv1.bias.numpy(), treeConv2.bias.numpy())) + np.testing.assert_array_equal(treeConv1.weight.numpy(), + treeConv2.weight.numpy()) + np.testing.assert_array_equal(treeConv1.bias.numpy(), + treeConv2.bias.numpy()) def test_conv3d_transpose(self): input_array = np.arange(0, 48).reshape([2, 3, 2, 2, @@ -2257,21 +2238,19 @@ class TestLayer(LayerTest): self.assertFalse( np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())) conv3d2.weight.set_value(conv3d1_weight_np) - self.assertTrue( - np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())) + np.testing.assert_array_equal(conv3d1_weight_np, + conv3d2.weight.numpy()) conv3d1.bias.set_value(conv3d1_bias) dy_ret1 = conv3d1(base.to_variable(images)) dy_ret2 = conv3d2(base.to_variable(images)) - self.assertTrue(np.array_equal(dy_ret1.numpy(), - dy_ret2.numpy())) + np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) conv3d2.weight = conv3d1.weight conv3d2.bias = conv3d1.bias - self.assertTrue( - np.array_equal(conv3d1.weight.numpy(), - conv3d2.weight.numpy())) - self.assertTrue( - np.array_equal(conv3d1.bias.numpy(), conv3d2.bias.numpy())) + np.testing.assert_array_equal(conv3d1.weight.numpy(), + conv3d2.weight.numpy()) + np.testing.assert_array_equal(conv3d1.bias.numpy(), + conv3d2.bias.numpy()) images = np.ones([2, 3, 6, 6, 6], dtype='float32') custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32") @@ -2297,19 +2276,19 @@ class TestLayer(LayerTest): self.assertFalse( np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())) conv3d2.weight.set_value(conv3d1_weight_np) - self.assertTrue( - np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy())) + np.testing.assert_array_equal(conv3d1_weight_np, + conv3d2.weight.numpy()) conv3d1.bias.set_value(conv3d1_bias) dy_ret1 = conv3d1(base.to_variable(images)) dy_ret2 = conv3d2(base.to_variable(images)) - self.assertTrue(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) + np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) conv3d2.weight = conv3d1.weight conv3d2.bias = conv3d1.bias - self.assertTrue( - np.array_equal(conv3d1.weight.numpy(), conv3d2.weight.numpy())) - self.assertTrue( - np.array_equal(conv3d1.bias.numpy(), conv3d2.bias.numpy())) + np.testing.assert_array_equal(conv3d1.weight.numpy(), + conv3d2.weight.numpy()) + np.testing.assert_array_equal(conv3d1.bias.numpy(), + conv3d2.bias.numpy()) def test_eye_op(self): np_eye = np.eye(3, 2) @@ -2398,7 +2377,7 @@ class TestLayer(LayerTest): layers.while_loop(cond1, body2, [j]) - self.assertTrue(np.array_equal(static_ret[0], dy_ret[0].numpy())) + np.testing.assert_array_equal(static_ret[0], dy_ret[0].numpy()) def test_while_loop(self): with _test_eager_guard(): @@ -2598,8 +2577,8 @@ class TestLayer(LayerTest): lambda: less_than_branch(a, b)) eager_dynamic_res = out.numpy() eager_dynamic_res2 = out2.numpy() - self.assertTrue( - np.array_equal(eager_dynamic_res, eager_dynamic_res2)) + np.testing.assert_array_equal(eager_dynamic_res, + eager_dynamic_res2) with self.assertRaises(TypeError): layers.cond(a < b, 'str', 'str') with self.assertRaises(TypeError): @@ -2613,14 +2592,14 @@ class TestLayer(LayerTest): lambda: less_than_branch(a, b)) dynamic_res = out.numpy() dynamic_res2 = out2.numpy() - self.assertTrue(np.array_equal(dynamic_res, dynamic_res2)) + np.testing.assert_array_equal(dynamic_res, dynamic_res2) with self.assertRaises(TypeError): layers.cond(a < b, 'str', 'str') with self.assertRaises(TypeError): layers.cond(a >= b, 'str', 'str') - self.assertTrue(np.array_equal(static_res, dynamic_res)) - self.assertTrue(np.array_equal(static_res, eager_dynamic_res)) + np.testing.assert_array_equal(static_res, dynamic_res) + np.testing.assert_array_equal(static_res, eager_dynamic_res) def test_case(self): @@ -2683,10 +2662,10 @@ class TestLayer(LayerTest): dynamic_res1 = out_1.numpy() dynamic_res2 = out_2.numpy() - self.assertTrue(np.array_equal(static_res1, dynamic_res1)) - self.assertTrue(np.array_equal(static_res2, dynamic_res2)) - self.assertTrue(np.array_equal(static_res1, eager_dynamic_res1)) - self.assertTrue(np.array_equal(static_res2, eager_dynamic_res2)) + np.testing.assert_array_equal(static_res1, dynamic_res1) + np.testing.assert_array_equal(static_res2, dynamic_res2) + np.testing.assert_array_equal(static_res1, eager_dynamic_res1) + np.testing.assert_array_equal(static_res2, eager_dynamic_res2) def test_switch_case(self): @@ -2768,12 +2747,12 @@ class TestLayer(LayerTest): dynamic_res2 = out_2.numpy() dynamic_res3 = out_3.numpy() - self.assertTrue(np.array_equal(static_res1, dynamic_res1)) - self.assertTrue(np.array_equal(static_res2, dynamic_res2)) - self.assertTrue(np.array_equal(static_res3, dynamic_res3)) - self.assertTrue(np.array_equal(static_res1, eager_dynamic_res1)) - self.assertTrue(np.array_equal(static_res2, eager_dynamic_res2)) - self.assertTrue(np.array_equal(static_res3, eager_dynamic_res3)) + np.testing.assert_array_equal(static_res1, dynamic_res1) + np.testing.assert_array_equal(static_res2, dynamic_res2) + np.testing.assert_array_equal(static_res3, dynamic_res3) + np.testing.assert_array_equal(static_res1, eager_dynamic_res1) + np.testing.assert_array_equal(static_res2, eager_dynamic_res2) + np.testing.assert_array_equal(static_res3, eager_dynamic_res3) def test_crop_tensor(self): with self.static_graph(): @@ -2848,7 +2827,7 @@ class TestLayer(LayerTest): predict = fluid.layers.softmax(fc_out) dynamic_out = fluid.layers.accuracy(input=predict, label=label, k=5) - self.assertTrue(np.array_equal(static_out[0], dynamic_out.numpy())) + np.testing.assert_array_equal(static_out[0], dynamic_out.numpy()) class TestBook(LayerTest): @@ -2909,9 +2888,11 @@ class TestBook(LayerTest): continue if method.__name__ not in self.not_compare_static_dygraph_set: - self.assertTrue( - np.array_equal(static_result[0], dy_result_value), - "Result of function [{}] not equal".format(method.__name__)) + np.testing.assert_array_equal( + static_result[0], + dy_result_value, + err_msg='Result of function [{}] not equal'.format( + method.__name__)) def test_all_layers(self): with _test_eager_guard(): @@ -4210,8 +4191,8 @@ class TestBook(LayerTest): 0.5, rois_num=rois_num_dy) dy_res_value = dy_res[0].numpy() - self.assertTrue(np.array_equal(static_res, dy_res_value)) - self.assertTrue(np.array_equal(static_res, dy_eager_res_value)) + np.testing.assert_array_equal(static_res, dy_res_value) + np.testing.assert_array_equal(static_res, dy_eager_res_value) def test_sequence_enumerate(self): # TODO(minqiyang): dygraph do not support lod now @@ -4261,8 +4242,8 @@ class TestBook(LayerTest): 2, rois_num=rois_num_dy) dy_res_value = dy_res.numpy() - self.assertTrue(np.array_equal(static_res, dy_eager_res_value)) - self.assertTrue(np.array_equal(static_res, dy_res_value)) + np.testing.assert_array_equal(static_res, dy_eager_res_value) + np.testing.assert_array_equal(static_res, dy_res_value) def test_dice_loss(self): num_classes = 4 @@ -4295,8 +4276,8 @@ class TestBook(LayerTest): label_ = base.to_variable(label_np) dy_res = layers.dice_loss(input_, label_, eps) dy_res_value = dy_res.numpy() - self.assertTrue(np.array_equal(static_res, dy_res_value)) - self.assertTrue(np.array_equal(static_res, dy_eager_res_value)) + np.testing.assert_array_equal(static_res, dy_res_value) + np.testing.assert_array_equal(static_res, dy_eager_res_value) def test_roi_perspective_transform(self): # TODO(minqiyang): dygraph do not support lod now diff --git a/python/paddle/fluid/tests/unittests/test_load_op.py b/python/paddle/fluid/tests/unittests/test_load_op.py index a9865251355..7fd09be0778 100644 --- a/python/paddle/fluid/tests/unittests/test_load_op.py +++ b/python/paddle/fluid/tests/unittests/test_load_op.py @@ -62,7 +62,7 @@ class TestLoadOp(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(start_prog) ret = exe.run(main_prog, fetch_list=[var.name]) - self.assertTrue(np.array_equal(self.ones, ret[0])) + np.testing.assert_array_equal(self.ones, ret[0]) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_load_op_xpu.py b/python/paddle/fluid/tests/unittests/test_load_op_xpu.py index 8d7f65116b6..51799813fb6 100644 --- a/python/paddle/fluid/tests/unittests/test_load_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/test_load_op_xpu.py @@ -64,7 +64,7 @@ class TestLoadOpXpu(unittest.TestCase): exe = fluid.Executor(fluid.XPUPlace(0)) exe.run(start_prog) ret = exe.run(main_prog, fetch_list=[var.name]) - self.assertTrue(np.array_equal(self.ones, ret[0])) + np.testing.assert_array_equal(self.ones, ret[0]) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py b/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py index 0005ccb4ab6..804eb988787 100644 --- a/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py +++ b/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py @@ -123,7 +123,7 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase): def check_load_state_dict(self, orig_dict, load_dict): for var_name, value in six.iteritems(orig_dict): - self.assertTrue(np.array_equal(value, load_dict[var_name])) + np.testing.assert_array_equal(value, load_dict[var_name]) def test_load_default(self): self.save_dirname = os.path.join( diff --git a/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py b/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py index 793d0e9bf5a..c7a118b3731 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py +++ b/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py @@ -17,7 +17,7 @@ from __future__ import print_function import unittest import paddle import paddle.fluid.core as core -import numpy +import numpy as np class TestLoDTensorArray(unittest.TestCase): @@ -30,7 +30,7 @@ class TestLoDTensorArray(unittest.TestCase): cpu = core.CPUPlace() for i in range(10): t = core.LoDTensor() - t.set(numpy.array([i], dtype='float32'), cpu) + t.set(np.array([i], dtype='float32'), cpu) t.set_recursive_sequence_lengths([[1]]) tensor_array.append(t) @@ -38,16 +38,15 @@ class TestLoDTensorArray(unittest.TestCase): for i in range(10): t = tensor_array[i] - self.assertEqual(numpy.array(t), numpy.array([i], dtype='float32')) + self.assertEqual(np.array(t), np.array([i], dtype='float32')) self.assertEqual([[1]], t.recursive_sequence_lengths()) t = core.LoDTensor() - t.set(numpy.array([i + 10], dtype='float32'), cpu) + t.set(np.array([i + 10], dtype='float32'), cpu) t.set_recursive_sequence_lengths([[1]]) tensor_array[i] = t t = tensor_array[i] - self.assertEqual(numpy.array(t), - numpy.array([i + 10], dtype='float32')) + self.assertEqual(np.array(t), np.array([i + 10], dtype='float32')) self.assertEqual([[1]], t.recursive_sequence_lengths()) @@ -60,13 +59,12 @@ class TestCreateArray(unittest.TestCase): def test_initialized_list_and_error(self): paddle.disable_static() init_data = [ - numpy.random.random(shape).astype('float32') - for shape in self.shapes + np.random.random(shape).astype('float32') for shape in self.shapes ] array = paddle.tensor.create_array( 'float32', [paddle.to_tensor(x) for x in init_data]) for res, gt in zip(array, init_data): - self.assertTrue(numpy.array_equal(res, gt)) + np.testing.assert_array_equal(res, gt) # test for None array = paddle.tensor.create_array('float32') diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py index 9dc7c1aa636..d5da01d47a8 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py @@ -231,12 +231,12 @@ class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase): def test_embedding_weights(self): result = convert_uint16_to_float(self.result[0]) - self.assertTrue(np.array_equal(self.w_fp32, result)) + np.testing.assert_array_equal(self.w_fp32, result) def test_lookup_results(self): lookup_result = convert_uint16_to_float(self.result[1]) lookup_ref = _lookup(self.w_fp32, self.ids, self.flat_ids) - self.assertTrue(np.array_equal(lookup_result, lookup_ref)) + np.testing.assert_array_equal(lookup_result, lookup_ref) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py index 06b232443a8..d9bca121656 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py @@ -119,12 +119,12 @@ class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase): def test_embedding_weights(self): result = convert_uint16_to_float(self.result[0]) - self.assertTrue(np.array_equal(self.w_fp32, result)) + np.testing.assert_array_equal(self.w_fp32, result) def test_lookup_results(self): lookup_result = convert_uint16_to_float(self.result[1]) lookup_ref = _lookup(self.w_fp32, self.ids, self.flat_ids, self.op_type) - self.assertTrue(np.array_equal(lookup_result, lookup_ref)) + np.testing.assert_array_equal(lookup_result, lookup_ref) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch.py b/python/paddle/fluid/tests/unittests/test_math_op_patch.py index 9dd47647a1a..037ca7aa066 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch.py @@ -37,15 +37,15 @@ class TestMathOpPatches(unittest.TestCase): # e = a + ab place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.random(size=[10, 1]).astype('float32') + a_np = np.random.random(size=[10, 1]).astype('float32') b_np, c_np, d_np = exe.run(fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b, c, d]) - self.assertTrue(numpy.allclose(a_np + 10, b_np)) - ab_np = numpy.concatenate([a_np, b_np], axis=1) - self.assertTrue(numpy.allclose(ab_np + 10, c_np)) - d_expected = ab_np + numpy.concatenate([a_np, a_np], axis=1) - self.assertTrue(numpy.allclose(d_expected, d_np)) + self.assertTrue(np.allclose(a_np + 10, b_np)) + ab_np = np.concatenate([a_np, b_np], axis=1) + self.assertTrue(np.allclose(ab_np + 10, c_np)) + d_expected = ab_np + np.concatenate([a_np, a_np], axis=1) + self.assertTrue(np.allclose(d_expected, d_np)) @prog_scope() def test_radd_scalar(self): @@ -53,11 +53,11 @@ class TestMathOpPatches(unittest.TestCase): b = 10 + a place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.random(size=[10, 1]).astype('float32') + a_np = np.random.random(size=[10, 1]).astype('float32') b_np = exe.run(fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b]) - self.assertTrue(numpy.allclose(a_np + 10, b_np)) + self.assertTrue(np.allclose(a_np + 10, b_np)) @prog_scope() def test_sub_scalar(self): @@ -65,11 +65,11 @@ class TestMathOpPatches(unittest.TestCase): b = a - 10 place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.random(size=[10, 1]).astype('float32') + a_np = np.random.random(size=[10, 1]).astype('float32') b_np = exe.run(fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b]) - self.assertTrue(numpy.allclose(a_np - 10, b_np)) + self.assertTrue(np.allclose(a_np - 10, b_np)) @prog_scope() def test_radd_scalar(self): @@ -77,11 +77,11 @@ class TestMathOpPatches(unittest.TestCase): b = 10 - a place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.random(size=[10, 1]).astype('float32') + a_np = np.random.random(size=[10, 1]).astype('float32') b_np = exe.run(fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b]) - self.assertTrue(numpy.allclose(10 - a_np, b_np)) + self.assertTrue(np.allclose(10 - a_np, b_np)) @prog_scope() def test_mul_scalar(self): @@ -89,11 +89,11 @@ class TestMathOpPatches(unittest.TestCase): b = a * 10 place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.random(size=[10, 1]).astype('float32') + a_np = np.random.random(size=[10, 1]).astype('float32') b_np = exe.run(fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b]) - self.assertTrue(numpy.allclose(a_np * 10, b_np)) + self.assertTrue(np.allclose(a_np * 10, b_np)) @prog_scope() def test_rmul_scalar(self): @@ -101,11 +101,11 @@ class TestMathOpPatches(unittest.TestCase): b = 10 * a place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.random(size=[10, 1]).astype('float32') + a_np = np.random.random(size=[10, 1]).astype('float32') b_np = exe.run(fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b]) - self.assertTrue(numpy.allclose(10 * a_np, b_np)) + self.assertTrue(np.allclose(10 * a_np, b_np)) @prog_scope() def test_div_scalar(self): @@ -113,11 +113,11 @@ class TestMathOpPatches(unittest.TestCase): b = a / 10 place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.random(size=[10, 1]).astype('float32') + a_np = np.random.random(size=[10, 1]).astype('float32') b_np = exe.run(fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b]) - self.assertTrue(numpy.allclose(a_np / 10, b_np)) + self.assertTrue(np.allclose(a_np / 10, b_np)) @prog_scope() def test_rdiv_scalar(self): @@ -125,12 +125,12 @@ class TestMathOpPatches(unittest.TestCase): b = 10 / a place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.random(size=[10, 1]).astype('float32') + 1e-2 + a_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2 b_np = exe.run(fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b]) - self.assertTrue(numpy.allclose(10 / a_np, b_np)) + self.assertTrue(np.allclose(10 / a_np, b_np)) @prog_scope() def test_div_two_tensor(self): @@ -139,15 +139,15 @@ class TestMathOpPatches(unittest.TestCase): c = a / b place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.random(size=[10, 1]).astype('float32') - b_np = numpy.random.random(size=[10, 1]).astype('float32') + 1e-2 + a_np = np.random.random(size=[10, 1]).astype('float32') + b_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2 c_np = exe.run(fluid.default_main_program(), feed={ "a": a_np, 'b': b_np }, fetch_list=[c]) - self.assertTrue(numpy.allclose(a_np / b_np, c_np)) + self.assertTrue(np.allclose(a_np / b_np, c_np)) @prog_scope() def test_mul_two_tensor(self): @@ -156,15 +156,15 @@ class TestMathOpPatches(unittest.TestCase): c = a * b place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.random(size=[10, 1]).astype('float32') - b_np = numpy.random.random(size=[10, 1]).astype('float32') + a_np = np.random.random(size=[10, 1]).astype('float32') + b_np = np.random.random(size=[10, 1]).astype('float32') c_np = exe.run(fluid.default_main_program(), feed={ "a": a_np, 'b': b_np }, fetch_list=[c]) - self.assertTrue(numpy.allclose(a_np * b_np, c_np)) + self.assertTrue(np.allclose(a_np * b_np, c_np)) @prog_scope() def test_add_two_tensor(self): @@ -173,15 +173,15 @@ class TestMathOpPatches(unittest.TestCase): c = a + b place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.random(size=[10, 1]).astype('float32') - b_np = numpy.random.random(size=[10, 1]).astype('float32') + a_np = np.random.random(size=[10, 1]).astype('float32') + b_np = np.random.random(size=[10, 1]).astype('float32') c_np = exe.run(fluid.default_main_program(), feed={ "a": a_np, 'b': b_np }, fetch_list=[c]) - self.assertTrue(numpy.allclose(a_np + b_np, c_np)) + self.assertTrue(np.allclose(a_np + b_np, c_np)) @prog_scope() def test_sub_two_tensor(self): @@ -190,15 +190,15 @@ class TestMathOpPatches(unittest.TestCase): c = a - b place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.random(size=[10, 1]).astype('float32') - b_np = numpy.random.random(size=[10, 1]).astype('float32') + a_np = np.random.random(size=[10, 1]).astype('float32') + b_np = np.random.random(size=[10, 1]).astype('float32') c_np = exe.run(fluid.default_main_program(), feed={ "a": a_np, 'b': b_np }, fetch_list=[c]) - self.assertTrue(numpy.allclose(a_np - b_np, c_np)) + self.assertTrue(np.allclose(a_np - b_np, c_np)) @prog_scope() def test_integer_div(self): @@ -206,13 +206,13 @@ class TestMathOpPatches(unittest.TestCase): b = a / 7 place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.array([3, 4, 10, 14, 9, 18]).astype('int64') + a_np = np.array([3, 4, 10, 14, 9, 18]).astype('int64') b_np, = exe.run(fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b]) b_np_actual = (a_np / 7).astype('float32') - self.assertTrue(numpy.allclose(b_np, b_np_actual)) + self.assertTrue(np.allclose(b_np, b_np_actual)) @prog_scope() def test_equal(self): @@ -222,8 +222,8 @@ class TestMathOpPatches(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.array([3, 4, 10, 14, 9, 18]).astype('float32') - b_np = numpy.array([3, 4, 11, 15, 8, 18]).astype('float32') + a_np = np.array([3, 4, 10, 14, 9, 18]).astype('float32') + b_np = np.array([3, 4, 11, 15, 8, 18]).astype('float32') c_np, = exe.run(fluid.default_main_program(), feed={ @@ -232,7 +232,7 @@ class TestMathOpPatches(unittest.TestCase): }, fetch_list=[c]) - self.assertTrue(numpy.array_equal(c_np, a_np == b_np)) + np.testing.assert_array_equal(c_np, a_np == b_np) self.assertEqual(c.dtype, fluid.core.VarDesc.VarType.BOOL) @prog_scope() @@ -247,8 +247,8 @@ class TestMathOpPatches(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.array([3, 4, 10, 14, 9, 18]).astype('float') - b_np = numpy.array([3, 4, 11, 15, 8, 18]).astype('float') + a_np = np.array([3, 4, 10, 14, 9, 18]).astype('float') + b_np = np.array([3, 4, 11, 15, 8, 18]).astype('float') c_np, = exe.run(fluid.default_main_program(), feed={ "a": a_np, @@ -256,7 +256,7 @@ class TestMathOpPatches(unittest.TestCase): }, fetch_list=[c]) - self.assertTrue(numpy.array_equal(c_np, a_np - b_np)) + np.testing.assert_array_equal(c_np, a_np - b_np) @prog_scope() def test_neg(self): @@ -264,12 +264,12 @@ class TestMathOpPatches(unittest.TestCase): b = -a place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.uniform(-1, 1, size=[10, 1]).astype('float32') + a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float32') b_np = exe.run(fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b]) - self.assertTrue(numpy.allclose(-a_np, b_np)) + self.assertTrue(np.allclose(-a_np, b_np)) @prog_scope() def test_astype(self): @@ -277,12 +277,12 @@ class TestMathOpPatches(unittest.TestCase): b = a.astype('float32') place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = numpy.random.uniform(-1, 1, size=[10, 1]).astype('float64') + a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float64') b_np = exe.run(fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b]) - self.assertTrue(numpy.allclose(a_np.astype('float32'), b_np)) + self.assertTrue(np.allclose(a_np.astype('float32'), b_np)) def test_bitwise_and(self): x_np = np.random.randint(-100, 100, [2, 3, 5]).astype("int32") @@ -300,7 +300,7 @@ class TestMathOpPatches(unittest.TestCase): "y": y_np }, fetch_list=[z]) - self.assertTrue(np.array_equal(out[0], out_np)) + np.testing.assert_array_equal(out[0], out_np) @prog_scope() def test_bitwise_or(self): @@ -319,7 +319,7 @@ class TestMathOpPatches(unittest.TestCase): "y": y_np }, fetch_list=[z]) - self.assertTrue(np.array_equal(out[0], out_np)) + np.testing.assert_array_equal(out[0], out_np) @prog_scope() def test_bitwise_xor(self): @@ -338,7 +338,7 @@ class TestMathOpPatches(unittest.TestCase): "y": y_np }, fetch_list=[z]) - self.assertTrue(np.array_equal(out[0], out_np)) + np.testing.assert_array_equal(out[0], out_np) @prog_scope() def test_bitwise_not(self): @@ -352,7 +352,7 @@ class TestMathOpPatches(unittest.TestCase): out = exe.run(fluid.default_main_program(), feed={"x": x_np}, fetch_list=[z]) - self.assertTrue(np.array_equal(out[0], out_np)) + np.testing.assert_array_equal(out[0], out_np) @prog_scope() def test_T(self): @@ -366,7 +366,7 @@ class TestMathOpPatches(unittest.TestCase): out = exe.run(fluid.default_main_program(), feed={"x": x_np}, fetch_list=[z]) - self.assertTrue(np.array_equal(out[0], out_np)) + np.testing.assert_array_equal(out[0], out_np) @prog_scope() def test_ndim(self): @@ -380,8 +380,8 @@ class TestMathOpPatches(unittest.TestCase): a = paddle.static.data(name='a', shape=[2, 3], dtype='float32') b = paddle.static.data(name='b', shape=[3, 5], dtype='float32') c = a @ b # __matmul__ - a_np = numpy.random.uniform(-1, 1, size=[2, 3]).astype('float32') - b_np = numpy.random.uniform(-1, 1, size=[3, 5]).astype('float32') + a_np = np.random.uniform(-1, 1, size=[2, 3]).astype('float32') + b_np = np.random.uniform(-1, 1, size=[3, 5]).astype('float32') place = paddle.CPUPlace() exe = paddle.static.Executor(place) c_np = exe.run(paddle.static.default_main_program(), @@ -390,7 +390,7 @@ class TestMathOpPatches(unittest.TestCase): "b": b_np }, fetch_list=[c]) - self.assertTrue(numpy.allclose(a_np @ b_np, c_np)) + self.assertTrue(np.allclose(a_np @ b_np, c_np)) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py index 92fa9049dab..fd768b1516f 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py @@ -35,7 +35,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) res = a + b - self.assertTrue(np.array_equal(res.numpy(), a_np + b_np)) + np.testing.assert_array_equal(res.numpy(), a_np + b_np) def test_add(self): with _test_eager_guard(): @@ -49,7 +49,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) res = a - b - self.assertTrue(np.array_equal(res.numpy(), a_np - b_np)) + np.testing.assert_array_equal(res.numpy(), a_np - b_np) def test_sub(self): with _test_eager_guard(): @@ -63,7 +63,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) res = a * b - self.assertTrue(np.array_equal(res.numpy(), a_np * b_np)) + np.testing.assert_array_equal(res.numpy(), a_np * b_np) def test_mul(self): with _test_eager_guard(): @@ -91,7 +91,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = 0.1 res = a + b - self.assertTrue(np.array_equal(res.numpy(), a_np + b)) + np.testing.assert_array_equal(res.numpy(), a_np + b) def test_add_scalar(self): with _test_eager_guard(): @@ -104,7 +104,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = 0.1 res = b + a - self.assertTrue(np.array_equal(res.numpy(), b + a_np)) + np.testing.assert_array_equal(res.numpy(), b + a_np) def test_add_scalar_reverse(self): with _test_eager_guard(): @@ -117,7 +117,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = 0.1 res = a - b - self.assertTrue(np.array_equal(res.numpy(), a_np - b)) + np.testing.assert_array_equal(res.numpy(), a_np - b) def test_sub_scalar(self): with _test_eager_guard(): @@ -130,7 +130,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = 0.1 res = b - a - self.assertTrue(np.array_equal(res.numpy(), b - a_np)) + np.testing.assert_array_equal(res.numpy(), b - a_np) def test_sub_scalar_reverse(self): with _test_eager_guard(): @@ -143,7 +143,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = 0.1 res = a * b - self.assertTrue(np.array_equal(res.numpy(), a_np * b)) + np.testing.assert_array_equal(res.numpy(), a_np * b) def test_mul_scalar(self): with _test_eager_guard(): @@ -186,7 +186,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) res = a // b - self.assertTrue(np.array_equal(res.numpy(), a_np // b_np)) + np.testing.assert_array_equal(res.numpy(), a_np // b_np) def test_floor_div(self): with _test_eager_guard(): @@ -200,7 +200,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) res = a % b - self.assertTrue(np.array_equal(res.numpy(), a_np % b_np)) + np.testing.assert_array_equal(res.numpy(), a_np % b_np) def test_mod(self): with _test_eager_guard(): @@ -218,19 +218,19 @@ class TestMathOpPatchesVarBase(unittest.TestCase): out_np = x_np & y_np out = x & y - self.assertTrue(np.array_equal(out.numpy(), out_np)) + np.testing.assert_array_equal(out.numpy(), out_np) out_np = x_np | y_np out = x | y - self.assertTrue(np.array_equal(out.numpy(), out_np)) + np.testing.assert_array_equal(out.numpy(), out_np) out_np = x_np ^ y_np out = x ^ y - self.assertTrue(np.array_equal(out.numpy(), out_np)) + np.testing.assert_array_equal(out.numpy(), out_np) out_np = ~x_np out = ~x - self.assertTrue(np.array_equal(out.numpy(), out_np)) + np.testing.assert_array_equal(out.numpy(), out_np) def test_bitwise(self): with _test_eager_guard(): @@ -248,8 +248,8 @@ class TestMathOpPatchesVarBase(unittest.TestCase): c = fluid.dygraph.to_variable(c_np) res1 = (a == b) res2 = (a == c) - self.assertTrue(np.array_equal(res1.numpy(), a_np == b_np)) - self.assertTrue(np.array_equal(res2.numpy(), a_np == c_np)) + np.testing.assert_array_equal(res1.numpy(), a_np == b_np) + np.testing.assert_array_equal(res2.numpy(), a_np == c_np) def test_equal(self): with _test_eager_guard(): @@ -266,8 +266,8 @@ class TestMathOpPatchesVarBase(unittest.TestCase): c = fluid.dygraph.to_variable(c_np) res1 = (a != b) res2 = (a != c) - self.assertTrue(np.array_equal(res1.numpy(), a_np != b_np)) - self.assertTrue(np.array_equal(res2.numpy(), a_np != c_np)) + np.testing.assert_array_equal(res1.numpy(), a_np != b_np) + np.testing.assert_array_equal(res2.numpy(), a_np != c_np) def test_not_equal(self): with _test_eager_guard(): @@ -281,7 +281,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) res = (a < b) - self.assertTrue(np.array_equal(res.numpy(), a_np < b_np)) + np.testing.assert_array_equal(res.numpy(), a_np < b_np) def test_less_than(self): with _test_eager_guard(): @@ -295,7 +295,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) res = (a <= b) - self.assertTrue(np.array_equal(res.numpy(), a_np <= b_np)) + np.testing.assert_array_equal(res.numpy(), a_np <= b_np) def test_less_equal(self): with _test_eager_guard(): @@ -309,7 +309,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) res = (a > b) - self.assertTrue(np.array_equal(res.numpy(), a_np > b_np)) + np.testing.assert_array_equal(res.numpy(), a_np > b_np) def test_greater_than(self): with _test_eager_guard(): @@ -323,7 +323,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) res = (a >= b) - self.assertTrue(np.array_equal(res.numpy(), a_np >= b_np)) + np.testing.assert_array_equal(res.numpy(), a_np >= b_np) def test_greater_equal(self): with _test_eager_guard(): @@ -335,7 +335,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): with fluid.dygraph.guard(): a = fluid.dygraph.to_variable(a_np) res = -a - self.assertTrue(np.array_equal(res.numpy(), -a_np)) + np.testing.assert_array_equal(res.numpy(), -a_np) def test_neg(self): with _test_eager_guard(): @@ -406,7 +406,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) res = a + b - self.assertTrue(np.array_equal(res.numpy(), a_np + b_np)) + np.testing.assert_array_equal(res.numpy(), a_np + b_np) def test_add_different_dtype(self): with _test_eager_guard(): @@ -420,7 +420,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = paddle.to_tensor(a_np) b = paddle.to_tensor(b_np) res = a // b - self.assertTrue(np.array_equal(res.numpy(), a_np // b_np)) + np.testing.assert_array_equal(res.numpy(), a_np // b_np) def test_floordiv_different_dtype(self): with _test_eager_guard(): @@ -438,8 +438,8 @@ class TestMathOpPatchesVarBase(unittest.TestCase): self.assertEqual(res1.dtype, res2.dtype) self.assertEqual(res1.dtype, res3.dtype) - self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) - self.assertTrue(np.array_equal(res1.numpy(), res3.numpy())) + np.testing.assert_array_equal(res1.numpy(), res2.numpy()) + np.testing.assert_array_equal(res1.numpy(), res3.numpy()) def test_astype(self): with _test_eager_guard(): @@ -454,7 +454,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): b = fluid.dygraph.to_variable(b_np) self.assertEqual((a != b).dtype, fluid.core.VarDesc.VarType.BOOL) - self.assertTrue(np.array_equal((a != b).numpy(), a_np != b_np)) + np.testing.assert_array_equal((a != b).numpy(), a_np != b_np) def test_conpare_op_broadcast(self): with _test_eager_guard(): @@ -480,248 +480,195 @@ class TestMathOpPatchesVarBase(unittest.TestCase): self.assertEqual(x.ndim, 2) self.assertEqual(x.size, 6) self.assertEqual(x.numel(), 6) - self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy())) - self.assertTrue(np.array_equal(x.tanh().numpy(), - paddle.tanh(x).numpy())) - self.assertTrue(np.array_equal(x.atan().numpy(), - paddle.atan(x).numpy())) - self.assertTrue(np.array_equal(x.abs().numpy(), paddle.abs(x).numpy())) + np.testing.assert_array_equal(x.exp().numpy(), paddle.exp(x).numpy()) + np.testing.assert_array_equal(x.tanh().numpy(), paddle.tanh(x).numpy()) + np.testing.assert_array_equal(x.atan().numpy(), paddle.atan(x).numpy()) + np.testing.assert_array_equal(x.abs().numpy(), paddle.abs(x).numpy()) m = x.abs() - self.assertTrue(np.array_equal(m.sqrt().numpy(), - paddle.sqrt(m).numpy())) - self.assertTrue( - np.array_equal(m.rsqrt().numpy(), - paddle.rsqrt(m).numpy())) - self.assertTrue(np.array_equal(x.ceil().numpy(), - paddle.ceil(x).numpy())) - self.assertTrue( - np.array_equal(x.floor().numpy(), - paddle.floor(x).numpy())) - self.assertTrue(np.array_equal(x.cos().numpy(), paddle.cos(x).numpy())) - self.assertTrue(np.array_equal(x.acos().numpy(), - paddle.acos(x).numpy())) - self.assertTrue(np.array_equal(x.asin().numpy(), - paddle.asin(x).numpy())) - self.assertTrue(np.array_equal(x.sin().numpy(), paddle.sin(x).numpy())) - self.assertTrue(np.array_equal(x.sinh().numpy(), - paddle.sinh(x).numpy())) - self.assertTrue(np.array_equal(x.cosh().numpy(), - paddle.cosh(x).numpy())) - self.assertTrue( - np.array_equal(x.round().numpy(), - paddle.round(x).numpy())) - self.assertTrue( - np.array_equal(x.reciprocal().numpy(), - paddle.reciprocal(x).numpy())) - self.assertTrue( - np.array_equal(x.square().numpy(), - paddle.square(x).numpy())) - self.assertTrue(np.array_equal(x.rank().numpy(), - paddle.rank(x).numpy())) - self.assertTrue(np.array_equal(x[0].t().numpy(), - paddle.t(x[0]).numpy())) - self.assertTrue( - np.array_equal(x.asinh().numpy(), - paddle.asinh(x).numpy())) + np.testing.assert_array_equal(m.sqrt().numpy(), paddle.sqrt(m).numpy()) + np.testing.assert_array_equal(m.rsqrt().numpy(), + paddle.rsqrt(m).numpy()) + np.testing.assert_array_equal(x.ceil().numpy(), paddle.ceil(x).numpy()) + np.testing.assert_array_equal(x.floor().numpy(), + paddle.floor(x).numpy()) + np.testing.assert_array_equal(x.cos().numpy(), paddle.cos(x).numpy()) + np.testing.assert_array_equal(x.acos().numpy(), paddle.acos(x).numpy()) + np.testing.assert_array_equal(x.asin().numpy(), paddle.asin(x).numpy()) + np.testing.assert_array_equal(x.sin().numpy(), paddle.sin(x).numpy()) + np.testing.assert_array_equal(x.sinh().numpy(), paddle.sinh(x).numpy()) + np.testing.assert_array_equal(x.cosh().numpy(), paddle.cosh(x).numpy()) + np.testing.assert_array_equal(x.round().numpy(), + paddle.round(x).numpy()) + np.testing.assert_array_equal(x.reciprocal().numpy(), + paddle.reciprocal(x).numpy()) + np.testing.assert_array_equal(x.square().numpy(), + paddle.square(x).numpy()) + np.testing.assert_array_equal(x.rank().numpy(), paddle.rank(x).numpy()) + np.testing.assert_array_equal(x[0].t().numpy(), paddle.t(x[0]).numpy()) + np.testing.assert_array_equal(x.asinh().numpy(), + paddle.asinh(x).numpy()) ### acosh(x) = nan, need to change input t_np = np.random.uniform(1, 2, [2, 3]).astype(self.dtype) t = paddle.to_tensor(t_np) - self.assertTrue( - np.array_equal(t.acosh().numpy(), - paddle.acosh(t).numpy())) - self.assertTrue( - np.array_equal(x.atanh().numpy(), - paddle.atanh(x).numpy())) + np.testing.assert_array_equal(t.acosh().numpy(), + paddle.acosh(t).numpy()) + np.testing.assert_array_equal(x.atanh().numpy(), + paddle.atanh(x).numpy()) d = paddle.to_tensor([[1.2285208, 1.3491015, 1.4899898], [1.30058, 1.0688717, 1.4928783], [1.0958099, 1.3724753, 1.8926544]]) d = d.matmul(d.t()) # ROCM not support cholesky if not fluid.core.is_compiled_with_rocm(): - self.assertTrue( - np.array_equal(d.cholesky().numpy(), - paddle.cholesky(d).numpy())) - - self.assertTrue( - np.array_equal(x.is_empty().numpy(), - paddle.is_empty(x).numpy())) - self.assertTrue( - np.array_equal(x.isfinite().numpy(), - paddle.isfinite(x).numpy())) - self.assertTrue( - np.array_equal( - x.cast('int32').numpy(), - paddle.cast(x, 'int32').numpy())) - self.assertTrue( - np.array_equal( - x.expand([3, 2, 3]).numpy(), - paddle.expand(x, [3, 2, 3]).numpy())) - self.assertTrue( - np.array_equal( - x.tile([2, 2]).numpy(), - paddle.tile(x, [2, 2]).numpy())) - self.assertTrue( - np.array_equal(x.flatten().numpy(), - paddle.flatten(x).numpy())) + np.testing.assert_array_equal(d.cholesky().numpy(), + paddle.cholesky(d).numpy()) + + np.testing.assert_array_equal(x.is_empty().numpy(), + paddle.is_empty(x).numpy()) + np.testing.assert_array_equal(x.isfinite().numpy(), + paddle.isfinite(x).numpy()) + np.testing.assert_array_equal( + x.cast('int32').numpy(), + paddle.cast(x, 'int32').numpy()) + np.testing.assert_array_equal( + x.expand([3, 2, 3]).numpy(), + paddle.expand(x, [3, 2, 3]).numpy()) + np.testing.assert_array_equal( + x.tile([2, 2]).numpy(), + paddle.tile(x, [2, 2]).numpy()) + np.testing.assert_array_equal(x.flatten().numpy(), + paddle.flatten(x).numpy()) index = paddle.to_tensor([0, 1]) - self.assertTrue( - np.array_equal( - x.gather(index).numpy(), - paddle.gather(x, index).numpy())) + np.testing.assert_array_equal( + x.gather(index).numpy(), + paddle.gather(x, index).numpy()) index = paddle.to_tensor([[0, 1], [1, 2]]) - self.assertTrue( - np.array_equal( - x.gather_nd(index).numpy(), - paddle.gather_nd(x, index).numpy())) - self.assertTrue( - np.array_equal( - x.reverse([0, 1]).numpy(), - paddle.reverse(x, [0, 1]).numpy())) - self.assertTrue( - np.array_equal( - a.reshape([3, 2]).numpy(), - paddle.reshape(a, [3, 2]).numpy())) - self.assertTrue( - np.array_equal( - x.slice([0, 1], [0, 0], [1, 2]).numpy(), - paddle.slice(x, [0, 1], [0, 0], [1, 2]).numpy())) - self.assertTrue( - np.array_equal( - x.split(2)[0].numpy(), - paddle.split(x, 2)[0].numpy())) + np.testing.assert_array_equal( + x.gather_nd(index).numpy(), + paddle.gather_nd(x, index).numpy()) + np.testing.assert_array_equal( + x.reverse([0, 1]).numpy(), + paddle.reverse(x, [0, 1]).numpy()) + np.testing.assert_array_equal( + a.reshape([3, 2]).numpy(), + paddle.reshape(a, [3, 2]).numpy()) + np.testing.assert_array_equal( + x.slice([0, 1], [0, 0], [1, 2]).numpy(), + paddle.slice(x, [0, 1], [0, 0], [1, 2]).numpy()) + np.testing.assert_array_equal( + x.split(2)[0].numpy(), + paddle.split(x, 2)[0].numpy()) m = paddle.to_tensor( np.random.uniform(-1, 1, [1, 6, 1, 1]).astype(self.dtype)) - self.assertTrue( - np.array_equal( - m.squeeze([]).numpy(), - paddle.squeeze(m, []).numpy())) - self.assertTrue( - np.array_equal( - m.squeeze([1, 2]).numpy(), - paddle.squeeze(m, [1, 2]).numpy())) + np.testing.assert_array_equal( + m.squeeze([]).numpy(), + paddle.squeeze(m, []).numpy()) + np.testing.assert_array_equal( + m.squeeze([1, 2]).numpy(), + paddle.squeeze(m, [1, 2]).numpy()) m = paddle.to_tensor([2, 3, 3, 1, 5, 3], 'float32') - self.assertTrue( - np.array_equal(m.unique()[0].numpy(), - paddle.unique(m)[0].numpy())) - self.assertTrue( - np.array_equal( - m.unique(return_counts=True)[1], - paddle.unique(m, return_counts=True)[1])) - self.assertTrue(np.array_equal(x.flip([0]), paddle.flip(x, [0]))) - self.assertTrue(np.array_equal(x.unbind(0), paddle.unbind(x, 0))) - self.assertTrue(np.array_equal(x.roll(1), paddle.roll(x, 1))) - self.assertTrue(np.array_equal(x.cumsum(1), paddle.cumsum(x, 1))) + np.testing.assert_array_equal(m.unique()[0].numpy(), + paddle.unique(m)[0].numpy()) + np.testing.assert_array_equal( + m.unique(return_counts=True)[1], + paddle.unique(m, return_counts=True)[1]) + np.testing.assert_array_equal(x.flip([0]), paddle.flip(x, [0])) + np.testing.assert_array_equal(x.unbind(0), paddle.unbind(x, 0)) + np.testing.assert_array_equal(x.roll(1), paddle.roll(x, 1)) + np.testing.assert_array_equal(x.cumsum(1), paddle.cumsum(x, 1)) m = paddle.to_tensor(1) - self.assertTrue(np.array_equal(m.increment(), paddle.increment(m))) + np.testing.assert_array_equal(m.increment(), paddle.increment(m)) m = x.abs() - self.assertTrue(np.array_equal(m.log(), paddle.log(m))) - self.assertTrue(np.array_equal(x.pow(2), paddle.pow(x, 2))) - self.assertTrue(np.array_equal(x.reciprocal(), paddle.reciprocal(x))) + np.testing.assert_array_equal(m.log(), paddle.log(m)) + np.testing.assert_array_equal(x.pow(2), paddle.pow(x, 2)) + np.testing.assert_array_equal(x.reciprocal(), paddle.reciprocal(x)) # 2. Binary operation - self.assertTrue( - np.array_equal(x.divide(y).numpy(), - paddle.divide(x, y).numpy())) - self.assertTrue( - np.array_equal( - x.matmul(y, True, False).numpy(), - paddle.matmul(x, y, True, False).numpy())) - self.assertTrue( - np.array_equal( - x.norm(p='fro', axis=[0, 1]).numpy(), - paddle.norm(x, p='fro', axis=[0, 1]).numpy())) - self.assertTrue( - np.array_equal(x.dist(y).numpy(), - paddle.dist(x, y).numpy())) - self.assertTrue( - np.array_equal(x.cross(y).numpy(), - paddle.cross(x, y).numpy())) + np.testing.assert_array_equal( + x.divide(y).numpy(), + paddle.divide(x, y).numpy()) + np.testing.assert_array_equal( + x.matmul(y, True, False).numpy(), + paddle.matmul(x, y, True, False).numpy()) + np.testing.assert_array_equal( + x.norm(p='fro', axis=[0, 1]).numpy(), + paddle.norm(x, p='fro', axis=[0, 1]).numpy()) + np.testing.assert_array_equal( + x.dist(y).numpy(), + paddle.dist(x, y).numpy()) + np.testing.assert_array_equal( + x.cross(y).numpy(), + paddle.cross(x, y).numpy()) m = x.expand([2, 2, 3]) n = y.expand([2, 2, 3]).transpose([0, 2, 1]) - self.assertTrue( - np.array_equal(m.bmm(n).numpy(), - paddle.bmm(m, n).numpy())) - self.assertTrue( - np.array_equal( - x.histogram(5, -1, 1).numpy(), - paddle.histogram(x, 5, -1, 1).numpy())) - self.assertTrue( - np.array_equal(x.equal(y).numpy(), - paddle.equal(x, y).numpy())) - self.assertTrue( - np.array_equal( - x.greater_equal(y).numpy(), - paddle.greater_equal(x, y).numpy())) - self.assertTrue( - np.array_equal( - x.greater_than(y).numpy(), - paddle.greater_than(x, y).numpy())) - self.assertTrue( - np.array_equal( - x.less_equal(y).numpy(), - paddle.less_equal(x, y).numpy())) - self.assertTrue( - np.array_equal( - x.less_than(y).numpy(), - paddle.less_than(x, y).numpy())) - self.assertTrue( - np.array_equal( - x.not_equal(y).numpy(), - paddle.not_equal(x, y).numpy())) - self.assertTrue( - np.array_equal( - x.equal_all(y).numpy(), - paddle.equal_all(x, y).numpy())) - self.assertTrue( - np.array_equal( - x.allclose(y).numpy(), - paddle.allclose(x, y).numpy())) + np.testing.assert_array_equal( + m.bmm(n).numpy(), + paddle.bmm(m, n).numpy()) + np.testing.assert_array_equal( + x.histogram(5, -1, 1).numpy(), + paddle.histogram(x, 5, -1, 1).numpy()) + np.testing.assert_array_equal( + x.equal(y).numpy(), + paddle.equal(x, y).numpy()) + np.testing.assert_array_equal( + x.greater_equal(y).numpy(), + paddle.greater_equal(x, y).numpy()) + np.testing.assert_array_equal( + x.greater_than(y).numpy(), + paddle.greater_than(x, y).numpy()) + np.testing.assert_array_equal( + x.less_equal(y).numpy(), + paddle.less_equal(x, y).numpy()) + np.testing.assert_array_equal( + x.less_than(y).numpy(), + paddle.less_than(x, y).numpy()) + np.testing.assert_array_equal( + x.not_equal(y).numpy(), + paddle.not_equal(x, y).numpy()) + np.testing.assert_array_equal( + x.equal_all(y).numpy(), + paddle.equal_all(x, y).numpy()) + np.testing.assert_array_equal( + x.allclose(y).numpy(), + paddle.allclose(x, y).numpy()) m = x.expand([2, 2, 3]) - self.assertTrue( - np.array_equal( - x.expand_as(m).numpy(), - paddle.expand_as(x, m).numpy())) + np.testing.assert_array_equal( + x.expand_as(m).numpy(), + paddle.expand_as(x, m).numpy()) index = paddle.to_tensor([2, 1, 0]) - self.assertTrue( - np.array_equal( - a.scatter(index, b).numpy(), - paddle.scatter(a, index, b).numpy())) + np.testing.assert_array_equal( + a.scatter(index, b).numpy(), + paddle.scatter(a, index, b).numpy()) # 3. Bool tensor operation x = paddle.to_tensor([[True, False], [True, False]]) y = paddle.to_tensor([[False, False], [False, True]]) - self.assertTrue( - np.array_equal( - x.logical_and(y).numpy(), - paddle.logical_and(x, y).numpy())) - self.assertTrue( - np.array_equal( - x.logical_not(y).numpy(), - paddle.logical_not(x, y).numpy())) - self.assertTrue( - np.array_equal( - x.logical_or(y).numpy(), - paddle.logical_or(x, y).numpy())) - self.assertTrue( - np.array_equal( - x.logical_xor(y).numpy(), - paddle.logical_xor(x, y).numpy())) - self.assertTrue( - np.array_equal( - x.logical_and(y).numpy(), - paddle.logical_and(x, y).numpy())) + np.testing.assert_array_equal( + x.logical_and(y).numpy(), + paddle.logical_and(x, y).numpy()) + np.testing.assert_array_equal( + x.logical_not(y).numpy(), + paddle.logical_not(x, y).numpy()) + np.testing.assert_array_equal( + x.logical_or(y).numpy(), + paddle.logical_or(x, y).numpy()) + np.testing.assert_array_equal( + x.logical_xor(y).numpy(), + paddle.logical_xor(x, y).numpy()) + np.testing.assert_array_equal( + x.logical_and(y).numpy(), + paddle.logical_and(x, y).numpy()) a = paddle.to_tensor([[1, 2], [3, 4]]) b = paddle.to_tensor([[4, 3], [2, 1]]) - self.assertTrue( - np.array_equal( - x.where(a, b).numpy(), - paddle.where(x, a, b).numpy())) + np.testing.assert_array_equal( + x.where(a, b).numpy(), + paddle.where(x, a, b).numpy()) x_np = np.random.randn(3, 6, 9, 7) x = paddle.to_tensor(x_np) x_T = x.T self.assertTrue(x_T.shape, [7, 9, 6, 3]) - self.assertTrue(np.array_equal(x_T.numpy(), x_np.T)) + np.testing.assert_array_equal(x_T.numpy(), x_np.T) self.assertTrue(inspect.ismethod(a.dot)) self.assertTrue(inspect.ismethod(a.logsumexp)) @@ -781,7 +728,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): with fluid.dygraph.guard(): a = fluid.dygraph.to_variable(a_np) res = 1J * a - self.assertTrue(np.array_equal(res.numpy(), 1J * a_np)) + np.testing.assert_array_equal(res.numpy(), 1j * a_np) def test_complex_scalar(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py index e6481e12f1e..d069ebff1dc 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py @@ -523,7 +523,7 @@ class TestMatMulV2API(unittest.TestCase): result_np = np.matmul(input_x, input_y) self.assertTrue(paddle.isfinite(result)[0, 0, 0]) self.assertTrue(np.isfinite(result_np)[0, 0, 0]) - self.assertTrue(np.array_equal(result_np, result.numpy())) + np.testing.assert_array_equal(result_np, result.numpy()) paddle.set_flags( {'FLAGS_gemm_use_half_precision_compute_type': True}) diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index 6b7a47febb8..a3608b5aa5e 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -100,7 +100,7 @@ class TestFP16MeanOp(TestMeanOp): dx = paddle.grad(y, x)[0].numpy() dx_expected = self.dtype(1.0 / np.prod(x_np.shape)) * np.ones( x_np.shape).astype(self.dtype) - self.assertTrue(np.array_equal(dx, dx_expected)) + np.testing.assert_array_equal(dx, dx_expected) @OpTestTool.skip_if_not_cpu_bf16() @@ -193,7 +193,7 @@ class TestReduceMeanOp(OpTest): dx_expected = ref_reduce_mean_grad(self.inputs['X'], self.attrs['dim'], self.dtype) - self.assertTrue(np.array_equal(dx, dx_expected)) + np.testing.assert_array_equal(dx, dx_expected) class TestReduceMeanOpDefaultAttrs(TestReduceMeanOp): diff --git a/python/paddle/fluid/tests/unittests/test_memcpy_op.py b/python/paddle/fluid/tests/unittests/test_memcpy_op.py index a1469ca558b..7a925b10036 100755 --- a/python/paddle/fluid/tests/unittests/test_memcpy_op.py +++ b/python/paddle/fluid/tests/unittests/test_memcpy_op.py @@ -139,7 +139,7 @@ class TestMemcpy_FillConstant(unittest.TestCase): feed={}, fetch_list=[gpu_var.name, pinned_var.name]) expect_value = np.array([1]).astype('bool') - self.assertTrue(np.array_equal(gpu_, expect_value)) + np.testing.assert_array_equal(gpu_, expect_value) else: pass @@ -201,7 +201,7 @@ class TestMemcpyApi(unittest.TestCase): a = paddle.ones([1024, 1024]) b = paddle.tensor.creation._memcpy(a, paddle.CUDAPinnedPlace()) self.assertEqual(b.place.__repr__(), "Place(gpu_pinned)") - self.assertTrue(np.array_equal(a.numpy(), b.numpy())) + np.testing.assert_array_equal(a.numpy(), b.numpy()) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_memory_reuse_exclude_feed_var.py b/python/paddle/fluid/tests/unittests/test_memory_reuse_exclude_feed_var.py index 98550ac5018..7b335bf83d6 100644 --- a/python/paddle/fluid/tests/unittests/test_memory_reuse_exclude_feed_var.py +++ b/python/paddle/fluid/tests/unittests/test_memory_reuse_exclude_feed_var.py @@ -50,7 +50,7 @@ class TestMemoryReuseExcludeFeedVar(unittest.TestCase): for _ in range(self.iteration): exe.run(compiled_prog, feed=feed_dict, fetch_list=[loss.name]) - self.assertTrue(np.array_equal(np.array(image_tensor), np_image)) + np.testing.assert_array_equal(np.array(image_tensor), np_image) def test_main(self): places = [fluid.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_merged_adam_op.py b/python/paddle/fluid/tests/unittests/test_merged_adam_op.py index 02cadf02300..1038d0db4f6 100644 --- a/python/paddle/fluid/tests/unittests/test_merged_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_merged_adam_op.py @@ -135,7 +135,7 @@ class TestMergedAdam(unittest.TestCase): value2 = outs2[key] for i in range(len(value1)): if place == 'gpu': - self.assertTrue(np.array_equal(value1[i], value2[i])) + np.testing.assert_array_equal(value1[i], value2[i]) else: self.assertTrue(np.allclose(value1[i], value2[i], atol=1e-7)) diff --git a/python/paddle/fluid/tests/unittests/test_merged_momentum_op.py b/python/paddle/fluid/tests/unittests/test_merged_momentum_op.py index 4afdc267de5..957b9e45e0c 100644 --- a/python/paddle/fluid/tests/unittests/test_merged_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_merged_momentum_op.py @@ -314,7 +314,7 @@ class TestMergedMomentum(unittest.TestCase): self.assertEqual(len(outs1), len(outs2)) for i, (out1, out2) in enumerate(zip(outs1, outs2)): if isinstance(place, paddle.CUDAPlace): - self.assertTrue(np.array_equal(out1, out2)) + np.testing.assert_array_equal(out1, out2) else: self.assertTrue(np.allclose(out1, out2, atol=1e-7)) @@ -378,7 +378,7 @@ class TestMergedMomentum2(unittest.TestCase): self.assertEqual(len(outs1), len(outs2)) for i, (out1, out2) in enumerate(zip(outs1, outs2)): if isinstance(place, paddle.CUDAPlace): - self.assertTrue(np.array_equal(out1, out2)) + np.testing.assert_array_equal(out1, out2) else: self.assertTrue(np.allclose(out1, out2, atol=1e-7)) @@ -387,7 +387,7 @@ class TestMergedMomentum2(unittest.TestCase): self.assertEqual(len(outs3), len(outs4)) for j, (out3, out4) in enumerate(zip(outs3, outs4)): if isinstance(place, paddle.CUDAPlace): - self.assertTrue(np.array_equal(out3, out4)) + np.testing.assert_array_equal(out3, out4) else: self.assertTrue(np.allclose(out3, out4, atol=1e-7)) diff --git a/python/paddle/fluid/tests/unittests/test_mixed_precision.py b/python/paddle/fluid/tests/unittests/test_mixed_precision.py index 68dfb88ccd0..5fdc137f744 100644 --- a/python/paddle/fluid/tests/unittests/test_mixed_precision.py +++ b/python/paddle/fluid/tests/unittests/test_mixed_precision.py @@ -117,9 +117,9 @@ class AMPTest(unittest.TestCase): found_inf) if i % 2: self.assertTrue(found_inf) - self.assertTrue(np.array_equal(weight_, pre_weight_)) - self.assertTrue(np.array_equal(moment1_, pre_moment1_)) - self.assertTrue(np.array_equal(beta_pow1_, pre_beta_pow1_)) + np.testing.assert_array_equal(weight_, pre_weight_) + np.testing.assert_array_equal(moment1_, pre_moment1_) + np.testing.assert_array_equal(beta_pow1_, pre_beta_pow1_) else: self.assertFalse(found_inf) self.assertFalse(np.array_equal(weight_, pre_weight_)) diff --git a/python/paddle/fluid/tests/unittests/test_multinomial_op.py b/python/paddle/fluid/tests/unittests/test_multinomial_op.py index b60a46d66ad..2233189285a 100644 --- a/python/paddle/fluid/tests/unittests/test_multinomial_op.py +++ b/python/paddle/fluid/tests/unittests/test_multinomial_op.py @@ -251,33 +251,33 @@ class TestRandomValue(unittest.TestCase): self.assertEqual(np.sum(y), 5187793) self.assertEqual(np.mean(y), 5066.2041015625) expect = [9982, 1655, 4741, 1323, 9319, 3298, 6473, 7477, 2507, 2628] - self.assertTrue(np.array_equal(y[100:110, :].flatten(), expect)) + np.testing.assert_array_equal(y[100:110, :].flatten(), expect) y = paddle.multinomial(x, 5000, replacement=False).numpy() self.assertEqual(np.sum(y), 25603962316) self.assertEqual(np.mean(y), 5000.77388984375) expect = [7300, 6055, 8714, 5401, 7360, 161, 5035, 7002, 6788, 2916] - self.assertTrue(np.array_equal(y[100, 1000:1010], expect)) + np.testing.assert_array_equal(y[100, 1000:1010], expect) y = paddle.multinomial(x, 5000, replacement=False).numpy() self.assertEqual(np.sum(y), 25592855710) self.assertEqual(np.mean(y), 4998.604630859375) expect = [5700, 6567, 4399, 5688, 7472, 545, 6894, 526, 2124, 385] - self.assertTrue(np.array_equal(y[300, 3000:3010], expect)) + np.testing.assert_array_equal(y[300, 3000:3010], expect) y = paddle.multinomial(x, 20000, replacement=True).numpy() self.assertEqual(np.sum(y), 102371362581) self.assertEqual(np.mean(y), 4998.60168852539) self.assertEqual(np.std(y), 2886.316308500771) expect = [7630, 8235, 8445, 3275, 5580, 4591, 1331, 342, 1662, 7156] - self.assertTrue(np.array_equal(y[100, 0:10], expect)) + np.testing.assert_array_equal(y[100, 0:10], expect) y = paddle.multinomial(x, 20000, replacement=True).numpy() self.assertEqual(np.sum(y), 102400672117) self.assertEqual(np.mean(y), 5000.032818212891) self.assertEqual(np.std(y), 2886.913426124017) expect = [4159, 7849, 9305, 5759, 4422, 122, 345, 2897, 5200, 5911] - self.assertTrue(np.array_equal(y[100, 0:10], expect)) + np.testing.assert_array_equal(y[100, 0:10], expect) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_op_function_generator.py b/python/paddle/fluid/tests/unittests/test_op_function_generator.py index e97895cf8bb..82fe1e1781d 100644 --- a/python/paddle/fluid/tests/unittests/test_op_function_generator.py +++ b/python/paddle/fluid/tests/unittests/test_op_function_generator.py @@ -51,7 +51,7 @@ class TestVariable(unittest.TestCase): res1 = layers.elementwise_add(x, y) res2 = _C_ops.elementwise_add(x, y) - self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) + np.testing.assert_array_equal(res1.numpy(), res2.numpy()) def test_elementwise_mul(self): with fluid.dygraph.guard(): @@ -63,7 +63,7 @@ class TestVariable(unittest.TestCase): res1 = layers.elementwise_mul(x, y) res2 = _C_ops.elementwise_mul(x, y) - self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) + np.testing.assert_array_equal(res1.numpy(), res2.numpy()) def test_relu(self): with fluid.dygraph.guard(): @@ -73,7 +73,7 @@ class TestVariable(unittest.TestCase): res1 = layers.relu(x) res2 = _C_ops.relu(x) - self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) + np.testing.assert_array_equal(res1.numpy(), res2.numpy()) def test_trace_backward(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -91,8 +91,8 @@ class TestVariable(unittest.TestCase): x_grad = x.gradient() y_grad = y.gradient() - self.assertTrue(np.array_equal(x_grad, loss.gradient() * b)) - self.assertTrue(np.array_equal(y_grad, loss.gradient() * a)) + np.testing.assert_array_equal(x_grad, loss.gradient() * b) + np.testing.assert_array_equal(y_grad, loss.gradient() * a) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": False}) def test_traced_layer(self): @@ -106,8 +106,7 @@ class TestVariable(unittest.TestCase): layer, inputs=x) # dygraph out res_static_graph = static_layer([x])[0] - self.assertTrue( - np.array_equal(res_dygraph.numpy(), res_static_graph)) + np.testing.assert_array_equal(res_dygraph.numpy(), res_static_graph) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_ops_nms.py b/python/paddle/fluid/tests/unittests/test_ops_nms.py index 3d6f2b717f2..4004f29b446 100644 --- a/python/paddle/fluid/tests/unittests/test_ops_nms.py +++ b/python/paddle/fluid/tests/unittests/test_ops_nms.py @@ -100,9 +100,10 @@ class TestOpsNMS(unittest.TestCase): self.threshold) out_py = nms(boxes, self.threshold) - self.assertTrue( - np.array_equal(out.numpy(), out_py), - "paddle out: {}\n py out: {}\n".format(out, out_py)) + np.testing.assert_array_equal( + out.numpy(), + out_py, + err_msg='paddle out: {}\n py out: {}\n'.format(out, out_py)) def test_multiclass_nms_dynamic(self): for device in self.devices: @@ -118,9 +119,10 @@ class TestOpsNMS(unittest.TestCase): out_py = multiclass_nms(boxes, scores, category_idxs, self.threshold, self.topk) - self.assertTrue( - np.array_equal(out.numpy(), out_py), - "paddle out: {}\n py out: {}\n".format(out, out_py)) + np.testing.assert_array_equal( + out.numpy(), + out_py, + err_msg='paddle out: {}\n py out: {}\n'.format(out, out_py)) def test_multiclass_nms_static(self): for device in self.devices: @@ -157,9 +159,10 @@ class TestOpsNMS(unittest.TestCase): self.threshold, self.topk) out = np.array(out) out = np.squeeze(out) - self.assertTrue( - np.array_equal(out, out_py), - "paddle out: {}\n py out: {}\n".format(out, out_py)) + np.testing.assert_array_equal( + out, + out_py, + err_msg='paddle out: {}\n py out: {}\n'.format(out, out_py)) def test_multiclass_nms_dynamic_to_static(self): for device in self.devices: @@ -192,9 +195,10 @@ class TestOpsNMS(unittest.TestCase): ) load_func = paddle.jit.load(self.path) res = load_func(paddle.to_tensor(boxes)) - self.assertTrue( - np.array_equal(origin, res), - "origin out: {}\n inference model out: {}\n".format( + np.testing.assert_array_equal( + origin, + res, + err_msg='origin out: {}\n inference model out: {}\n'.format( origin, res)) def test_matrix_nms_dynamic(self): diff --git a/python/paddle/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py index 490167a8ff7..df470bc9ec1 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer.py @@ -1362,8 +1362,7 @@ class TestMasterWeightSaveForFP16(unittest.TestCase): use_save_load=True) out_no_state_dict = self.check_with_opt_state_dict( use_save_load=False) - self.assertTrue( - np.array_equal(out_use_state_dict, out_no_state_dict)) + np.testing.assert_array_equal(out_use_state_dict, out_no_state_dict) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py index 43d42769053..0faeaec53d2 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py @@ -194,15 +194,13 @@ class TestDygraphDoubleGrad(TestCase): if grad_y is not None: self.assertTrue(grad_y.stop_gradient) - self.assertTrue( - np.array_equal(grad_y.numpy(), - original_random_grad_y)) + np.testing.assert_array_equal(grad_y.numpy(), + original_random_grad_y) if grad_z is not None: self.assertTrue(grad_z.stop_gradient) - self.assertTrue( - np.array_equal(grad_z.numpy(), - original_random_grad_z)) + np.testing.assert_array_equal(grad_z.numpy(), + original_random_grad_z) def test_none_one_initial_gradient(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_paddle_save_load.py b/python/paddle/fluid/tests/unittests/test_paddle_save_load.py index d3052b719ae..44ec5a0aa6d 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_save_load.py @@ -117,7 +117,7 @@ class TestSaveLoadLargeParameters(unittest.TestCase): dict_load = paddle.load(path, return_numpy=True) # compare results before and after saving for key, value in save_dict.items(): - self.assertTrue(np.array_equal(dict_load[key], value.numpy())) + np.testing.assert_array_equal(dict_load[key], value.numpy()) class TestSaveLoadPickle(unittest.TestCase): @@ -158,8 +158,8 @@ class TestSaveLoadPickle(unittest.TestCase): dict_load = paddle.load(path) # compare results before and after saving for key, value in save_dict.items(): - self.assertTrue( - np.array_equal(dict_load[key].numpy(), value.numpy())) + np.testing.assert_array_equal(dict_load[key].numpy(), + value.numpy()) class TestSaveLoadAny(unittest.TestCase): @@ -242,7 +242,7 @@ class TestSaveLoadAny(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, np.array(base_t))) + np.testing.assert_array_equal(new_t, np.array(base_t)) # legacy paddle.fluid.save, paddle.load paddle.fluid.io.save(prog, path) self.set_zero(prog, place) @@ -252,7 +252,7 @@ class TestSaveLoadAny(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) # test for return tensor path_vars = 'test_replace_save_load_return_tensor_static/model' for var in prog.list_vars(): @@ -285,7 +285,7 @@ class TestSaveLoadAny(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def test_paddle_save_load_v2(self): paddle.disable_static() @@ -329,12 +329,12 @@ class TestSaveLoadAny(unittest.TestCase): if isinstance(v, dict): self.assertTrue(v == load_dict_tensor[k]) else: - self.assertTrue( - np.array_equal(v.numpy(), load_dict_tensor[k].numpy())) + np.testing.assert_array_equal(v.numpy(), + load_dict_tensor[k].numpy()) if not np.array_equal(v.numpy(), load_dict_np[k]): print(v.numpy()) print(load_dict_np[k]) - self.assertTrue(np.array_equal(v.numpy(), load_dict_np[k])) + np.testing.assert_array_equal(v.numpy(), load_dict_np[k]) def test_single_pickle_var_dygraph(self): # enable dygraph mode @@ -354,14 +354,14 @@ class TestSaveLoadAny(unittest.TestCase): isinstance( t_dygraph, (paddle.fluid.core.VarBase, paddle.fluid.core.eager.Tensor))) - self.assertTrue(np.array_equal(tensor.numpy(), np_dygraph)) - self.assertTrue(np.array_equal(tensor.numpy(), t_dygraph.numpy())) + np.testing.assert_array_equal(tensor.numpy(), np_dygraph) + np.testing.assert_array_equal(tensor.numpy(), t_dygraph.numpy()) paddle.enable_static() lod_static = paddle.load(path) np_static = paddle.load(path, return_numpy=True) self.assertTrue(isinstance(lod_static, paddle.fluid.core.LoDTensor)) - self.assertTrue(np.array_equal(tensor.numpy(), np_static)) - self.assertTrue(np.array_equal(tensor.numpy(), np.array(lod_static))) + np.testing.assert_array_equal(tensor.numpy(), np_static) + np.testing.assert_array_equal(tensor.numpy(), np.array(lod_static)) def test_single_pickle_var_static(self): # enable static mode @@ -394,17 +394,17 @@ class TestSaveLoadAny(unittest.TestCase): np_static = paddle.load(path, return_numpy=True) # set_tensor(np.ndarray) var.set_value(np_static, scope) - self.assertTrue(np.array_equal(origin_tensor, np.array(tensor))) + np.testing.assert_array_equal(origin_tensor, np.array(tensor)) # set_tensor(LoDTensor) self.set_zero(prog, place, scope) var.set_value(lod_static, scope) - self.assertTrue(np.array_equal(origin_tensor, np.array(tensor))) + np.testing.assert_array_equal(origin_tensor, np.array(tensor)) # enable dygraph mode paddle.disable_static() var_dygraph = paddle.load(path) np_dygraph = paddle.load(path, return_numpy=True) - self.assertTrue(np.array_equal(np.array(tensor), np_dygraph)) - self.assertTrue(np.array_equal(np.array(tensor), var_dygraph.numpy())) + np.testing.assert_array_equal(np.array(tensor), np_dygraph) + np.testing.assert_array_equal(np.array(tensor), var_dygraph.numpy()) def test_dygraph_save_static_load(self): inps = np.random.randn(1, IMAGE_SIZE).astype('float32') @@ -432,9 +432,8 @@ class TestSaveLoadAny(unittest.TestCase): program.set_state_dict(state_dict) state_dict_param = program.state_dict("param") for name, tensor in state_dict_dy.items(): - self.assertTrue( - np.array_equal(tensor.numpy(), - np.array(state_dict_param[tensor.name]))) + np.testing.assert_array_equal( + tensor.numpy(), np.array(state_dict_param[tensor.name])) def test_save_load_complex_object_dygraph_save(self): paddle.disable_static() @@ -471,58 +470,55 @@ class TestSaveLoadAny(unittest.TestCase): load_tensor3 = paddle.load(path3, return_numpy=False) load_tensor4 = paddle.load(path4, return_numpy=False) - self.assertTrue(np.array_equal(load_tensor1[0].numpy(), - obj1[0].numpy())) - self.assertTrue(np.array_equal(load_tensor1[1], obj1[1])) - self.assertTrue(np.array_equal(load_tensor1[2].numpy(), obj1[2][1])) + np.testing.assert_array_equal(load_tensor1[0].numpy(), obj1[0].numpy()) + np.testing.assert_array_equal(load_tensor1[1], obj1[1]) + np.testing.assert_array_equal(load_tensor1[2].numpy(), obj1[2][1]) for i in range(len(load_tensor1)): self.assertTrue( type(load_tensor1[i]) == type(load_tensor2['k1'][i])) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(v.numpy(), load_tensor2['k2'][k].numpy())) + np.testing.assert_array_equal(v.numpy(), + load_tensor2['k2'][k].numpy()) self.assertTrue(load_tensor2['epoch'] == 123) - self.assertTrue(np.array_equal(load_tensor3[0].numpy(), - obj3[0].numpy())) - self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1])) + np.testing.assert_array_equal(load_tensor3[0].numpy(), obj3[0].numpy()) + np.testing.assert_array_equal(np.array(load_tensor3[1]), obj3[1]) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(load_tensor3[2]["state_dict"][k].numpy(), - v.numpy())) + np.testing.assert_array_equal( + load_tensor3[2]['state_dict'][k].numpy(), v.numpy()) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(load_tensor3[2]["opt"][k].numpy(), v.numpy())) + np.testing.assert_array_equal(load_tensor3[2]['opt'][k].numpy(), + v.numpy()) - self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0])) + np.testing.assert_array_equal(load_tensor4[0].numpy(), obj4[0]) load_array1 = paddle.load(path1, return_numpy=True) load_array2 = paddle.load(path2, return_numpy=True) load_array3 = paddle.load(path3, return_numpy=True) load_array4 = paddle.load(path4, return_numpy=True) - self.assertTrue(np.array_equal(load_array1[0], obj1[0].numpy())) - self.assertTrue(np.array_equal(load_array1[1], obj1[1])) - self.assertTrue(np.array_equal(load_array1[2], obj1[2][1])) + np.testing.assert_array_equal(load_array1[0], obj1[0].numpy()) + np.testing.assert_array_equal(load_array1[1], obj1[1]) + np.testing.assert_array_equal(load_array1[2], obj1[2][1]) for i in range(len(load_array1)): self.assertTrue(type(load_array1[i]) == type(load_array2['k1'][i])) for k, v in state_dict.items(): - self.assertTrue(np.array_equal(v.numpy(), load_array2['k2'][k])) + np.testing.assert_array_equal(v.numpy(), load_array2['k2'][k]) self.assertTrue(load_array2['epoch'] == 123) - self.assertTrue(np.array_equal(load_array3[0], obj3[0].numpy())) - self.assertTrue(np.array_equal(load_array3[1], obj3[1])) + np.testing.assert_array_equal(load_array3[0], obj3[0].numpy()) + np.testing.assert_array_equal(load_array3[1], obj3[1]) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(load_array3[2]["state_dict"][k], v.numpy())) + np.testing.assert_array_equal(load_array3[2]['state_dict'][k], + v.numpy()) for k, v in state_dict.items(): - self.assertTrue(np.array_equal(load_array3[2]["opt"][k], v.numpy())) + np.testing.assert_array_equal(load_array3[2]['opt'][k], v.numpy()) - self.assertTrue(np.array_equal(load_array4[0], obj4[0])) + np.testing.assert_array_equal(load_array4[0], obj4[0]) # static mode paddle.enable_static() @@ -532,69 +528,68 @@ class TestSaveLoadAny(unittest.TestCase): load_tensor3 = paddle.load(path3, return_numpy=False) load_tensor4 = paddle.load(path4, return_numpy=False) - self.assertTrue( - np.array_equal(np.array(load_tensor1[0]), obj1[0].numpy())) - self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1])) - self.assertTrue(np.array_equal(np.array(load_tensor1[2]), obj1[2][1])) + np.testing.assert_array_equal(np.array(load_tensor1[0]), + obj1[0].numpy()) + np.testing.assert_array_equal(np.array(load_tensor1[1]), obj1[1]) + np.testing.assert_array_equal(np.array(load_tensor1[2]), obj1[2][1]) for i in range(len(load_tensor1)): self.assertTrue( type(load_tensor1[i]) == type(load_tensor2['k1'][i])) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(v.numpy(), np.array(load_tensor2['k2'][k]))) + np.testing.assert_array_equal(v.numpy(), + np.array(load_tensor2['k2'][k])) self.assertTrue(load_tensor2['epoch'] == 123) self.assertTrue(isinstance(load_tensor3[0], paddle.fluid.core.LoDTensor)) - self.assertTrue( - np.array_equal(np.array(load_tensor3[0]), obj3[0].numpy())) - self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1])) + np.testing.assert_array_equal(np.array(load_tensor3[0]), + obj3[0].numpy()) + np.testing.assert_array_equal(np.array(load_tensor3[1]), obj3[1]) for k, v in state_dict.items(): self.assertTrue( isinstance(load_tensor3[2]["state_dict"][k], paddle.fluid.core.LoDTensor)) - self.assertTrue( - np.array_equal(np.array(load_tensor3[2]["state_dict"][k]), - v.numpy())) + np.testing.assert_array_equal( + np.array(load_tensor3[2]['state_dict'][k]), v.numpy()) for k, v in state_dict.items(): self.assertTrue( isinstance(load_tensor3[2]["opt"][k], paddle.fluid.core.LoDTensor)) - self.assertTrue( - np.array_equal(np.array(load_tensor3[2]["opt"][k]), v.numpy())) + np.testing.assert_array_equal(np.array(load_tensor3[2]['opt'][k]), + v.numpy()) self.assertTrue(load_tensor4[0], paddle.fluid.core.LoDTensor) - self.assertTrue(np.array_equal(np.array(load_tensor4[0]), obj4[0])) + np.testing.assert_array_equal(np.array(load_tensor4[0]), obj4[0]) load_array1 = paddle.load(path1, return_numpy=True) load_array2 = paddle.load(path2, return_numpy=True) load_array3 = paddle.load(path3, return_numpy=True) load_array4 = paddle.load(path4, return_numpy=True) - self.assertTrue(np.array_equal(load_array1[0], obj1[0].numpy())) - self.assertTrue(np.array_equal(load_array1[1], obj1[1])) - self.assertTrue(np.array_equal(load_array1[2], obj1[2][1])) + np.testing.assert_array_equal(load_array1[0], obj1[0].numpy()) + np.testing.assert_array_equal(load_array1[1], obj1[1]) + np.testing.assert_array_equal(load_array1[2], obj1[2][1]) for i in range(len(load_array1)): self.assertTrue(type(load_array1[i]) == type(load_array2['k1'][i])) for k, v in state_dict.items(): - self.assertTrue(np.array_equal(v.numpy(), load_array2['k2'][k])) + np.testing.assert_array_equal(v.numpy(), load_array2['k2'][k]) self.assertTrue(load_array2['epoch'] == 123) self.assertTrue(isinstance(load_array3[0], np.ndarray)) - self.assertTrue(np.array_equal(load_array3[0], obj3[0].numpy())) - self.assertTrue(np.array_equal(load_array3[1], obj3[1])) + np.testing.assert_array_equal(load_array3[0], obj3[0].numpy()) + np.testing.assert_array_equal(load_array3[1], obj3[1]) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(load_array3[2]["state_dict"][k], v.numpy())) + np.testing.assert_array_equal(load_array3[2]['state_dict'][k], + v.numpy()) for k, v in state_dict.items(): - self.assertTrue(np.array_equal(load_array3[2]["opt"][k], v.numpy())) + np.testing.assert_array_equal(load_array3[2]['opt'][k], v.numpy()) - self.assertTrue(np.array_equal(load_array4[0], obj4[0])) + np.testing.assert_array_equal(load_array4[0], obj4[0]) def test_save_load_complex_object_static_save(self): paddle.enable_static() @@ -649,72 +644,66 @@ class TestSaveLoadAny(unittest.TestCase): load_tensor3 = paddle.load(path3, return_numpy=False) load_tensor4 = paddle.load(path4, return_numpy=False) - self.assertTrue( - np.array_equal(np.array(load_tensor1[0]), np.array(obj1[0]))) - self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1])) - self.assertTrue( - np.array_equal(np.array(load_tensor1[2]), obj1[2][1])) + np.testing.assert_array_equal(np.array(load_tensor1[0]), + np.array(obj1[0])) + np.testing.assert_array_equal(np.array(load_tensor1[1]), obj1[1]) + np.testing.assert_array_equal(np.array(load_tensor1[2]), obj1[2][1]) for i in range(len(load_tensor1)): self.assertTrue( type(load_tensor1[i]) == type(load_tensor2['k1'][i])) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(np.array(v), - np.array(load_tensor2['k2'][k]))) + np.testing.assert_array_equal(np.array(v), + np.array(load_tensor2['k2'][k])) self.assertTrue(load_tensor2['epoch'] == 123) self.assertTrue(isinstance(load_tensor3[0], fluid.core.LoDTensor)) - self.assertTrue(np.array_equal(np.array(load_tensor3[0]), obj3[0])) + np.testing.assert_array_equal(np.array(load_tensor3[0]), obj3[0]) self.assertTrue(isinstance(load_tensor3[1], fluid.core.LoDTensor)) - self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1])) + np.testing.assert_array_equal(np.array(load_tensor3[1]), obj3[1]) for k, v in state_dict.items(): self.assertTrue( isinstance(load_tensor3[2]["state_dict"][k], fluid.core.LoDTensor)) - self.assertTrue( - np.array_equal(np.array(load_tensor3[2]["state_dict"][k]), - np.array(v))) + np.testing.assert_array_equal( + np.array(load_tensor3[2]['state_dict'][k]), np.array(v)) for k, v in state_dict.items(): self.assertTrue( isinstance(load_tensor3[2]["opt"][k], fluid.core.LoDTensor)) - self.assertTrue( - np.array_equal(np.array(load_tensor3[2]["opt"][k]), - np.array(v))) + np.testing.assert_array_equal( + np.array(load_tensor3[2]['opt'][k]), np.array(v)) self.assertTrue(isinstance(load_tensor4[0], fluid.core.LoDTensor)) - self.assertTrue(np.array_equal(np.array(load_tensor4[0]), obj4[0])) + np.testing.assert_array_equal(np.array(load_tensor4[0]), obj4[0]) load_array1 = paddle.load(path1, return_numpy=True) load_array2 = paddle.load(path2, return_numpy=True) load_array3 = paddle.load(path3, return_numpy=True) load_array4 = paddle.load(path4, return_numpy=True) - self.assertTrue(np.array_equal(load_array1[0], np.array(obj1[0]))) - self.assertTrue(np.array_equal(load_array1[1], obj1[1])) - self.assertTrue(np.array_equal(load_array1[2], obj1[2][1])) + np.testing.assert_array_equal(load_array1[0], np.array(obj1[0])) + np.testing.assert_array_equal(load_array1[1], obj1[1]) + np.testing.assert_array_equal(load_array1[2], obj1[2][1]) for i in range(len(load_array1)): self.assertTrue( type(load_array1[i]) == type(load_array2['k1'][i])) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(np.array(v), load_array2['k2'][k])) + np.testing.assert_array_equal(np.array(v), load_array2['k2'][k]) self.assertTrue(load_array2['epoch'] == 123) - self.assertTrue(np.array_equal(load_array3[0], np.array(obj3[0]))) - self.assertTrue(np.array_equal(load_array3[1], obj3[1])) + np.testing.assert_array_equal(load_array3[0], np.array(obj3[0])) + np.testing.assert_array_equal(load_array3[1], obj3[1]) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(load_array3[2]["state_dict"][k], - np.array(v))) + np.testing.assert_array_equal(load_array3[2]['state_dict'][k], + np.array(v)) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(load_array3[2]["opt"][k], np.array(v))) + np.testing.assert_array_equal(load_array3[2]['opt'][k], + np.array(v)) - self.assertTrue(np.array_equal(load_array4[0], obj4[0])) + np.testing.assert_array_equal(load_array4[0], obj4[0]) # dygraph mode paddle.disable_static() @@ -724,79 +713,74 @@ class TestSaveLoadAny(unittest.TestCase): load_tensor3 = paddle.load(path3, return_numpy=False) load_tensor4 = paddle.load(path4, return_numpy=False) - self.assertTrue( - np.array_equal(np.array(load_tensor1[0]), np.array(obj1[0]))) - self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1])) - self.assertTrue(np.array_equal(load_tensor1[2].numpy(), obj1[2][1])) + np.testing.assert_array_equal(np.array(load_tensor1[0]), + np.array(obj1[0])) + np.testing.assert_array_equal(np.array(load_tensor1[1]), obj1[1]) + np.testing.assert_array_equal(load_tensor1[2].numpy(), obj1[2][1]) for i in range(len(load_tensor1)): self.assertTrue( type(load_tensor1[i]) == type(load_tensor2['k1'][i])) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(np.array(v), - np.array(load_tensor2['k2'][k]))) + np.testing.assert_array_equal(np.array(v), + np.array(load_tensor2['k2'][k])) self.assertTrue(load_tensor2['epoch'] == 123) self.assertTrue( isinstance(load_tensor3[0], (fluid.core.VarBase, fluid.core.eager.Tensor))) - self.assertTrue(np.array_equal(load_tensor3[0].numpy(), obj3[0])) + np.testing.assert_array_equal(load_tensor3[0].numpy(), obj3[0]) self.assertTrue( isinstance(load_tensor3[1], (fluid.core.VarBase, fluid.core.eager.Tensor))) - self.assertTrue(np.array_equal(load_tensor3[1].numpy(), obj3[1])) + np.testing.assert_array_equal(load_tensor3[1].numpy(), obj3[1]) for k, v in state_dict.items(): self.assertTrue( isinstance(load_tensor3[2]["state_dict"][k], (fluid.core.VarBase, fluid.core.eager.Tensor))) - self.assertTrue( - np.array_equal(load_tensor3[2]["state_dict"][k].numpy(), - np.array(v))) + np.testing.assert_array_equal( + load_tensor3[2]['state_dict'][k].numpy(), np.array(v)) for k, v in state_dict.items(): self.assertTrue( isinstance(load_tensor3[2]["opt"][k], (fluid.core.VarBase, fluid.core.eager.Tensor))) - self.assertTrue( - np.array_equal(load_tensor3[2]["opt"][k].numpy(), - np.array(v))) + np.testing.assert_array_equal(load_tensor3[2]['opt'][k].numpy(), + np.array(v)) self.assertTrue( isinstance(load_tensor4[0], (fluid.core.VarBase, fluid.core.eager.Tensor))) - self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0])) + np.testing.assert_array_equal(load_tensor4[0].numpy(), obj4[0]) load_array1 = paddle.load(path1, return_numpy=True) load_array2 = paddle.load(path2, return_numpy=True) load_array3 = paddle.load(path3, return_numpy=True) load_array4 = paddle.load(path4, return_numpy=True) - self.assertTrue(np.array_equal(load_array1[0], np.array(obj1[0]))) - self.assertTrue(np.array_equal(load_array1[1], obj1[1])) - self.assertTrue(np.array_equal(load_array1[2], obj1[2][1])) + np.testing.assert_array_equal(load_array1[0], np.array(obj1[0])) + np.testing.assert_array_equal(load_array1[1], obj1[1]) + np.testing.assert_array_equal(load_array1[2], obj1[2][1]) for i in range(len(load_array1)): self.assertTrue( type(load_array1[i]) == type(load_array2['k1'][i])) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(np.array(v), load_array2['k2'][k])) + np.testing.assert_array_equal(np.array(v), load_array2['k2'][k]) self.assertTrue(load_array2['epoch'] == 123) - self.assertTrue(np.array_equal(load_array3[0], np.array(obj3[0]))) - self.assertTrue(np.array_equal(load_array3[1], obj3[1])) + np.testing.assert_array_equal(load_array3[0], np.array(obj3[0])) + np.testing.assert_array_equal(load_array3[1], obj3[1]) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(load_array3[2]["state_dict"][k], - np.array(v))) + np.testing.assert_array_equal(load_array3[2]['state_dict'][k], + np.array(v)) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(load_array3[2]["opt"][k], np.array(v))) + np.testing.assert_array_equal(load_array3[2]['opt'][k], + np.array(v)) self.assertTrue(isinstance(load_array4[0], np.ndarray)) - self.assertTrue(np.array_equal(load_array4[0], obj4[0])) + np.testing.assert_array_equal(load_array4[0], obj4[0]) def test_varbase_binary_var(self): paddle.disable_static() @@ -810,8 +794,8 @@ class TestSaveLoadAny(unittest.TestCase): load_tensor_array = load_tensor.numpy() if paddle.fluid.core.is_compiled_with_cuda(): fluid.core._cuda_synchronize(paddle.CUDAPlace(0)) - self.assertTrue(np.array_equal(origin_array, load_array)) - self.assertTrue(np.array_equal(origin_array, load_tensor_array)) + np.testing.assert_array_equal(origin_array, load_array) + np.testing.assert_array_equal(origin_array, load_tensor_array) class TestSaveLoadToMemory(unittest.TestCase): @@ -828,10 +812,10 @@ class TestSaveLoadToMemory(unittest.TestCase): # load state_dict dict_load = paddle.load(byio, return_numpy=True) for k, v in state_dict.items(): - self.assertTrue(np.array_equal(v.numpy(), dict_load[k])) + np.testing.assert_array_equal(v.numpy(), dict_load[k]) # load tensor tensor_load = paddle.load(byio, return_numpy=True) - self.assertTrue(np.array_equal(tensor_load, tensor.numpy())) + np.testing.assert_array_equal(tensor_load, tensor.numpy()) with self.assertRaises(ValueError): paddle.save(4, 3) @@ -874,11 +858,11 @@ class TestSaveLoadToMemory(unittest.TestCase): prog_load.desc.serialize_to_string()) tensor_load = paddle.load(byio, return_numpy=True) - self.assertTrue(np.array_equal(tensor_load, np.array(tensor))) + np.testing.assert_array_equal(tensor_load, np.array(tensor)) state_dict_load = paddle.load(byio, return_numpy=True) for k, v in state_dict.items(): - self.assertTrue(np.array_equal(np.array(v), state_dict_load[k])) + np.testing.assert_array_equal(np.array(v), state_dict_load[k]) class TestSaveLoad(unittest.TestCase): @@ -915,7 +899,7 @@ class TestSaveLoad(unittest.TestCase): for var_name, value in orig_dict.items(): load_value = load_dict[var_name].numpy() if hasattr( load_dict[var_name], 'numpy') else np.array(load_dict[var_name]) - self.assertTrue(np.array_equal(value.numpy(), load_value)) + np.testing.assert_array_equal(value.numpy(), load_value) def test_save_load(self): layer, opt = self.build_and_train_model() diff --git a/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py b/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py index c7ac11546e1..4357b9925d3 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py @@ -120,7 +120,7 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) # test for io.save_vars/replace_load_vars path_vars2 = os.path.join( self.temp_dir.name, @@ -137,7 +137,7 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) def test_save_load_lod_tensor(self): paddle.enable_static() @@ -177,7 +177,7 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): self.assertTrue( list(loaded_tensor.shape()) == [IMAGE_SIZE, OUTPUT_NUM]) to_array = np.array(loaded_tensor) - self.assertTrue(np.array_equal(origin, to_array)) + np.testing.assert_array_equal(origin, to_array) with self.assertRaises(NotImplementedError): path = os.path.join(self.temp_dir.name, 'test_save_load_error/temp') @@ -216,7 +216,7 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): # load from memory loaded_tensor_mem = paddle.load(byio) to_array_mem = np.array(loaded_tensor_mem) - self.assertTrue(np.array_equal(np.array(tensor), to_array_mem)) + np.testing.assert_array_equal(np.array(tensor), to_array_mem) with self.assertRaises(NotImplementedError): paddle.framework.io._save_lod_tensor(tensor, 1) @@ -247,8 +247,7 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): self.assertTrue(isinstance(load_sr, fluid.core.SelectedRows)) self.assertTrue(list(load_sr.rows()) == rows) self.assertTrue(load_sr.height() == height) - self.assertTrue(np.array_equal(np.array(load_sr.get_tensor()), - np_array)) + np.testing.assert_array_equal(np.array(load_sr.get_tensor()), np_array) with self.assertRaises(RuntimeError): fluid.core.save_selected_rows( @@ -273,8 +272,8 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): self.assertTrue(isinstance(selected_rows_mem, fluid.core.SelectedRows)) self.assertTrue(list(selected_rows_mem.rows()) == rows) self.assertTrue(selected_rows_mem.height() == height) - self.assertTrue( - np.array_equal(np.array(selected_rows_mem.get_tensor()), np_array)) + np.testing.assert_array_equal(np.array(selected_rows_mem.get_tensor()), + np_array) with self.assertRaises(NotImplementedError): paddle.framework.io._save_selected_rows(selected_rows, 1) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_isolated_var.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_isolated_var.py index 1a015369ec6..0a1b2a5d459 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_isolated_var.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_isolated_var.py @@ -103,7 +103,7 @@ class TestParallelExecutorFetchIsolatedVarBase(unittest.TestCase): }, fetch_list=[loss, isolated_var]) - self.assertTrue(np.array_equal(y_np, y_np_fetch)) + np.testing.assert_array_equal(y_np, y_np_fetch) enable_parallel_ssa_executor(False) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_inference_feed_partial_data.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_inference_feed_partial_data.py index a3a26f481f3..5500f671804 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_inference_feed_partial_data.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_inference_feed_partial_data.py @@ -49,12 +49,12 @@ class TestInferencePartialFeed(unittest.TestCase): gen_random = lambda shape: np.random.uniform( low=-1.0, high=1.0, size=shape).astype('float32') - assert_result = lambda feed, result: self.assertTrue( - np.array_equal(np.maximum(0, feed), result)) + assert_result = lambda feed, result: np.testing.assert_array_equal( + np.maximum(0, feed), result) def assert_merged_unmerged(merged, unmerged): unmerged = np.concatenate(unmerged, axis=0) - self.assertTrue(np.array_equal(merged, unmerged)) + np.testing.assert_array_equal(merged, unmerged) def feed_split_test(): for place_num in six.moves.range(1, len(places) * 3): diff --git a/python/paddle/fluid/tests/unittests/test_parameter.py b/python/paddle/fluid/tests/unittests/test_parameter.py index d75a6c0dd90..0115f7fec51 100644 --- a/python/paddle/fluid/tests/unittests/test_parameter.py +++ b/python/paddle/fluid/tests/unittests/test_parameter.py @@ -46,9 +46,9 @@ class ParameterChecks(unittest.TestCase): self.assertEqual(0, param.block.idx) exe = Executor(paddle.CPUPlace()) p = exe.run(main_program, fetch_list=[param])[0] - self.assertTrue(np.array_equal(p, np.ones(shape) * val)) + np.testing.assert_array_equal(p, np.ones(shape) * val) p = io.get_parameter_value_by_name('fc.w', exe, main_program) - self.assertTrue(np.array_equal(p, np.ones(shape) * val)) + np.testing.assert_array_equal(p, np.ones(shape) * val) def func_parambase(self): with guard(): @@ -61,7 +61,7 @@ class ParameterChecks(unittest.TestCase): self.assertEqual(param_copy.type, param.type) self.assertEqual(param_copy.dtype, param.dtype) self.assertEqual(str(param_copy.place), str(param.place)) - self.assertTrue(np.array_equal(param_copy.numpy(), param.numpy())) + np.testing.assert_array_equal(param_copy.numpy(), param.numpy()) self.assertEqual(param_copy.optimize_attr, param.optimize_attr) self.assertEqual(param_copy.regularizer, param.regularizer) self.assertEqual(param_copy.do_model_average, @@ -116,12 +116,10 @@ class ParameterChecks(unittest.TestCase): paddle.nn.utils.vector_to_parameters(vec, linear2.parameters()) self.assertEqual(linear2.weight.shape, [10, 15]) self.assertEqual(linear2.bias.shape, [15]) - self.assertTrue( - np.array_equal(linear1.weight.numpy(), linear2.weight.numpy()), - True) - self.assertTrue( - np.array_equal(linear1.bias.numpy(), linear2.bias.numpy()), - True) + np.testing.assert_array_equal(linear1.weight.numpy(), + linear2.weight.numpy()) + np.testing.assert_array_equal(linear1.bias.numpy(), + linear2.bias.numpy()) self.assertTrue(linear2.weight.is_leaf, True) self.assertTrue(linear2.bias.is_leaf, True) diff --git a/python/paddle/fluid/tests/unittests/test_poisson_op.py b/python/paddle/fluid/tests/unittests/test_poisson_op.py index 51f19747f2b..764ba03d401 100644 --- a/python/paddle/fluid/tests/unittests/test_poisson_op.py +++ b/python/paddle/fluid/tests/unittests/test_poisson_op.py @@ -108,7 +108,7 @@ class TestPoissonAPI(unittest.TestCase): y = paddle.poisson(x) y.backward() self.assertTrue(np.min(y.numpy()) >= 0) - self.assertTrue(np.array_equal(np.zeros_like(x), x.gradient())) + np.testing.assert_array_equal(np.zeros_like(x), x.gradient()) def test_fixed_random_number(self): # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t' @@ -127,31 +127,31 @@ class TestPoissonAPI(unittest.TestCase): 13., 13., 11., 8., 12., 6., 9., 15., 16., 6., 13., 12., 9., 15., 17., 8., 11., 16., 11., 10. ] - self.assertTrue(np.array_equal(y_np[0, 0, 0, 0:20], expect)) + np.testing.assert_array_equal(y_np[0, 0, 0, 0:20], expect) expect = [ 15., 7., 12., 8., 14., 10., 10., 11., 11., 11., 21., 6., 9., 13., 13., 11., 6., 9., 12., 12. ] - self.assertTrue(np.array_equal(y_np[8, 1, 300, 200:220], expect)) + np.testing.assert_array_equal(y_np[8, 1, 300, 200:220], expect) expect = [ 10., 15., 9., 6., 4., 13., 10., 10., 13., 12., 9., 7., 10., 14., 7., 10., 8., 5., 10., 14. ] - self.assertTrue(np.array_equal(y_np[16, 1, 600, 400:420], expect)) + np.testing.assert_array_equal(y_np[16, 1, 600, 400:420], expect) expect = [ 10., 9., 14., 12., 8., 9., 7., 8., 11., 10., 13., 8., 12., 9., 7., 8., 11., 11., 12., 5. ] - self.assertTrue(np.array_equal(y_np[24, 2, 900, 600:620], expect)) + np.testing.assert_array_equal(y_np[24, 2, 900, 600:620], expect) expect = [ 15., 5., 11., 13., 12., 12., 13., 16., 9., 9., 7., 9., 13., 11., 15., 6., 11., 9., 10., 10. ] - self.assertTrue(np.array_equal(y_np[31, 2, 1023, 748:768], expect)) + np.testing.assert_array_equal(y_np[31, 2, 1023, 748:768], expect) x = paddle.full([16, 1024, 1024], 5., dtype="float32") y = paddle.poisson(x) @@ -160,31 +160,31 @@ class TestPoissonAPI(unittest.TestCase): 4., 5., 2., 9., 8., 7., 4., 7., 4., 7., 6., 3., 10., 7., 5., 7., 2., 5., 5., 6. ] - self.assertTrue(np.array_equal(y_np[0, 0, 100:120], expect)) + np.testing.assert_array_equal(y_np[0, 0, 100:120], expect) expect = [ 1., 4., 8., 11., 6., 5., 4., 4., 7., 4., 4., 7., 11., 6., 5., 3., 4., 6., 3., 3. ] - self.assertTrue(np.array_equal(y_np[4, 300, 300:320], expect)) + np.testing.assert_array_equal(y_np[4, 300, 300:320], expect) expect = [ 7., 5., 4., 6., 8., 5., 6., 7., 7., 7., 3., 10., 5., 10., 4., 5., 8., 7., 5., 7. ] - self.assertTrue(np.array_equal(y_np[8, 600, 600:620], expect)) + np.testing.assert_array_equal(y_np[8, 600, 600:620], expect) expect = [ 8., 6., 7., 4., 3., 0., 4., 6., 6., 4., 3., 10., 5., 1., 3., 8., 8., 2., 1., 4. ] - self.assertTrue(np.array_equal(y_np[12, 900, 900:920], expect)) + np.testing.assert_array_equal(y_np[12, 900, 900:920], expect) expect = [ 2., 1., 14., 3., 6., 5., 2., 2., 6., 5., 7., 4., 8., 4., 8., 4., 5., 7., 1., 7. ] - self.assertTrue(np.array_equal(y_np[15, 1023, 1000:1020], expect)) + np.testing.assert_array_equal(y_np[15, 1023, 1000:1020], expect) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_prune.py b/python/paddle/fluid/tests/unittests/test_prune.py index 730a6c1b8a8..180988a2aa5 100644 --- a/python/paddle/fluid/tests/unittests/test_prune.py +++ b/python/paddle/fluid/tests/unittests/test_prune.py @@ -219,8 +219,8 @@ class TestExecutorRunAutoPrune(unittest.TestCase): self.assertIsNone(scope.find_var(loss2.name)) #loss2 is pruned weight = np.array( scope.find_var(w_param_attrs.name).get_tensor()) - self.assertTrue(np.array_equal(weight_init, - weight)) # weight not changed + np.testing.assert_array_equal(weight_init, + weight) # weight not changed def test_prune_fetches_with_optimizer(self): """ @@ -311,8 +311,8 @@ class TestExecutorRunAutoPrune(unittest.TestCase): self.assertIsNone(scope.find_var(loss2.name)) weight = np.array( scope.find_var(w_param_attrs.name).get_tensor()) - self.assertTrue(np.array_equal(weight_init, - weight)) # weight unchanged + np.testing.assert_array_equal(weight_init, + weight) # weight unchanged def test_prune_feed_with_optimizer(self): program = framework.Program() @@ -527,7 +527,7 @@ class TestExecutorRunAutoPrune(unittest.TestCase): weight_expected = np.array( scope.find_var(w_param_attrs.name).get_tensor()) - self.assertTrue(np.array_equal(weight_with_prune, weight_expected)) + np.testing.assert_array_equal(weight_with_prune, weight_expected) self.assertFalse(np.array_equal(weight_without_prune, weight_expected)) def test_prune_with_multi_devices(self): @@ -661,7 +661,7 @@ class TestExecutorRunAutoPrune(unittest.TestCase): weight_expected = np.array( scope.find_var(w_param_attrs.name).get_tensor()) - self.assertTrue(np.array_equal(weight_with_prune, weight_expected)) + np.testing.assert_array_equal(weight_with_prune, weight_expected) self.assertFalse(np.array_equal(weight_without_prune, weight_expected)) def test_prune_program_partial_parameter_updated(self): @@ -708,8 +708,8 @@ class TestExecutorRunAutoPrune(unittest.TestCase): scope.find_var(w2_param_attrs.name).get_tensor()) self.assertFalse(np.array_equal(weight1_init, weight1)) # weight changed - self.assertTrue(np.array_equal(weight2_init, - weight2)) # weight2 unchanged + np.testing.assert_array_equal(weight2_init, + weight2) # weight2 unchanged def test_prune_override_use_prune(self): ''' @@ -768,7 +768,7 @@ class TestExecutorRunAutoPrune(unittest.TestCase): weight_expected = np.array( scope.find_var(w_param_attrs.name).get_tensor()) - self.assertTrue(np.array_equal(weight_with_prune, weight_expected)) + np.testing.assert_array_equal(weight_with_prune, weight_expected) self.assertFalse(np.array_equal(weight_without_prune, weight_expected)) def test_prune_feed_var_in_fetchlist_1(self): @@ -797,8 +797,8 @@ class TestExecutorRunAutoPrune(unittest.TestCase): self.assertIsNone(scope.find_var(x.name)) weight = np.array( scope.find_var(w_param_attrs.name).get_tensor()) - self.assertTrue(np.array_equal(weight_init, - weight)) # weight unchanged + np.testing.assert_array_equal(weight_init, + weight) # weight unchanged def test_prune_feed_var_in_fetchlist_2(self): # the variable to be fed is leaf @@ -825,8 +825,8 @@ class TestExecutorRunAutoPrune(unittest.TestCase): self.assertIsNone(scope.find_var(loss2.name)) weight = np.array( scope.find_var(w_param_attrs.name).get_tensor()) - self.assertTrue(np.array_equal(weight_init, - weight)) # weight unchanged + np.testing.assert_array_equal(weight_init, + weight) # weight unchanged if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_combination.py b/python/paddle/fluid/tests/unittests/test_py_reader_combination.py index d3bc50bffe7..84ea5f5f7bb 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_combination.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_combination.py @@ -46,8 +46,8 @@ class TestPyReaderCombination(unittest.TestCase): image2 = np.array(py_reader_dict_data[0]['image']) label2 = np.array(py_reader_dict_data[0]['label']) - self.assertTrue(np.array_equal(image1, image2)) - self.assertTrue(np.array_equal(label1, label2)) + np.testing.assert_array_equal(image1, image2) + np.testing.assert_array_equal(label1, label2) # FIXME(zjl): do not know why Python 35 would raise SIGABRT if not reset reader # manually. diff --git a/python/paddle/fluid/tests/unittests/test_randint_op.py b/python/paddle/fluid/tests/unittests/test_randint_op.py index f5d18a9268f..c3b4d02af1d 100644 --- a/python/paddle/fluid/tests/unittests/test_randint_op.py +++ b/python/paddle/fluid/tests/unittests/test_randint_op.py @@ -232,22 +232,22 @@ class TestRandomValue(unittest.TestCase): self.assertTrue(x.mean(), -0.7517569760481516) self.assertTrue(x.std(), 5773.696619107639) expect = [2535, 2109, 5916, -5011, -261] - self.assertTrue(np.array_equal(x[10, 0, 100, 100:105], expect)) + np.testing.assert_array_equal(x[10, 0, 100, 100:105], expect) expect = [3465, 7206, -8660, -9628, -6574] - self.assertTrue(np.array_equal(x[20, 1, 600, 600:605], expect)) + np.testing.assert_array_equal(x[20, 1, 600, 600:605], expect) expect = [881, 1560, 1100, 9664, 1669] - self.assertTrue(np.array_equal(x[30, 2, 1000, 1000:1005], expect)) + np.testing.assert_array_equal(x[30, 2, 1000, 1000:1005], expect) x = paddle.randint(-10000, 10000, [32, 3, 1024, 1024], dtype='int64').numpy() self.assertTrue(x.mean(), -1.461287518342336) self.assertTrue(x.std(), 5773.023477548159) expect = [7213, -9597, 754, 8129, -1158] - self.assertTrue(np.array_equal(x[10, 0, 100, 100:105], expect)) + np.testing.assert_array_equal(x[10, 0, 100, 100:105], expect) expect = [-7159, 8054, 7675, 6980, 8506] - self.assertTrue(np.array_equal(x[20, 1, 600, 600:605], expect)) + np.testing.assert_array_equal(x[20, 1, 600, 600:605], expect) expect = [3581, 3420, -8027, -5237, -2436] - self.assertTrue(np.array_equal(x[30, 2, 1000, 1000:1005], expect)) + np.testing.assert_array_equal(x[30, 2, 1000, 1000:1005], expect) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_randperm_op.py b/python/paddle/fluid/tests/unittests/test_randperm_op.py index 5a75e839397..efbb12353da 100644 --- a/python/paddle/fluid/tests/unittests/test_randperm_op.py +++ b/python/paddle/fluid/tests/unittests/test_randperm_op.py @@ -173,61 +173,61 @@ class TestRandomValue(unittest.TestCase): expect = [ 24562, 8409, 9379, 10328, 20503, 18059, 9681, 21883, 11783, 27413 ] - self.assertTrue(np.array_equal(x[0:10], expect)) + np.testing.assert_array_equal(x[0:10], expect) expect = [ 29477, 27100, 9643, 16637, 8605, 16892, 27767, 2724, 1612, 13096 ] - self.assertTrue(np.array_equal(x[10000:10010], expect)) + np.testing.assert_array_equal(x[10000:10010], expect) expect = [ 298, 4104, 16479, 22714, 28684, 7510, 14667, 9950, 15940, 28343 ] - self.assertTrue(np.array_equal(x[20000:20010], expect)) + np.testing.assert_array_equal(x[20000:20010], expect) x = paddle.randperm(30000, dtype='int64').numpy() expect = [ 6587, 1909, 5525, 23001, 6488, 14981, 14355, 3083, 29561, 8171 ] - self.assertTrue(np.array_equal(x[0:10], expect)) + np.testing.assert_array_equal(x[0:10], expect) expect = [ 23460, 12394, 22501, 5427, 20185, 9100, 5127, 1651, 25806, 4818 ] - self.assertTrue(np.array_equal(x[10000:10010], expect)) + np.testing.assert_array_equal(x[10000:10010], expect) expect = [5829, 4508, 16193, 24836, 8526, 242, 9984, 9243, 1977, 11839] - self.assertTrue(np.array_equal(x[20000:20010], expect)) + np.testing.assert_array_equal(x[20000:20010], expect) x = paddle.randperm(30000, dtype='float32').numpy() expect = [ 5154., 10537., 14362., 29843., 27185., 28399., 27561., 4144., 22906., 10705. ] - self.assertTrue(np.array_equal(x[0:10], expect)) + np.testing.assert_array_equal(x[0:10], expect) expect = [ 1958., 18414., 20090., 21910., 22746., 27346., 22347., 3002., 4564., 26991. ] - self.assertTrue(np.array_equal(x[10000:10010], expect)) + np.testing.assert_array_equal(x[10000:10010], expect) expect = [ 25580., 12606., 553., 16387., 29536., 4241., 20946., 16899., 16339., 4662. ] - self.assertTrue(np.array_equal(x[20000:20010], expect)) + np.testing.assert_array_equal(x[20000:20010], expect) x = paddle.randperm(30000, dtype='float64').numpy() expect = [ 19051., 2449., 21940., 11121., 282., 7330., 13747., 24321., 21147., 9163. ] - self.assertTrue(np.array_equal(x[0:10], expect)) + np.testing.assert_array_equal(x[0:10], expect) expect = [ 15483., 1315., 5723., 20954., 13251., 25539., 5074., 1823., 14945., 17624. ] - self.assertTrue(np.array_equal(x[10000:10010], expect)) + np.testing.assert_array_equal(x[10000:10010], expect) expect = [ 10516., 2552., 29970., 5941., 986., 8007., 24805., 26753., 12202., 21404. ] - self.assertTrue(np.array_equal(x[20000:20010], expect)) + np.testing.assert_array_equal(x[20000:20010], expect) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_real_imag_op.py b/python/paddle/fluid/tests/unittests/test_real_imag_op.py index 1402585c037..f3226158ba3 100644 --- a/python/paddle/fluid/tests/unittests/test_real_imag_op.py +++ b/python/paddle/fluid/tests/unittests/test_real_imag_op.py @@ -119,7 +119,7 @@ class TestRealAPI(unittest.TestCase): exe = static.Executor(place) out_value = exe.run(feed=input_dict, fetch_list=[out.name]) - self.assertTrue(np.array_equal(np_res, out_value[0])) + np.testing.assert_array_equal(np_res, out_value[0]) def test_in_dynamic_mode(self): for dtype in self.dtypes: @@ -131,10 +131,10 @@ class TestRealAPI(unittest.TestCase): with fluid.dygraph.guard(place): input_t = paddle.to_tensor(input) res = paddle_apis[self.api](input_t).numpy() - self.assertTrue(np.array_equal(np_res, res)) + np.testing.assert_array_equal(np_res, res) res_t = input_t.real().numpy( ) if self.api is "real" else input_t.imag().numpy() - self.assertTrue(np.array_equal(np_res, res_t)) + np.testing.assert_array_equal(np_res, res_t) def test_name_argument(self): with static.program_guard(static.Program()): diff --git a/python/paddle/fluid/tests/unittests/test_reverse_op.py b/python/paddle/fluid/tests/unittests/test_reverse_op.py index adc9e513eaa..57b87bd8963 100644 --- a/python/paddle/fluid/tests/unittests/test_reverse_op.py +++ b/python/paddle/fluid/tests/unittests/test_reverse_op.py @@ -181,10 +181,10 @@ class TestReverseLoDTensorArray(unittest.TestCase): arr_len = len(res) - 1 reversed_array = res[-1] # check output - self.assertTrue(np.array_equal(gt, reversed_array)) + np.testing.assert_array_equal(gt, reversed_array) # check grad for i in range(arr_len): - self.assertTrue(np.array_equal(res[i], np.ones_like(res[i]))) + np.testing.assert_array_equal(res[i], np.ones_like(res[i])) def test_raise_error(self): # The len(axis) should be 1 is input(X) is LoDTensorArray diff --git a/python/paddle/fluid/tests/unittests/test_scale_op.py b/python/paddle/fluid/tests/unittests/test_scale_op.py index f00b5fdc436..0beee7b0a1c 100644 --- a/python/paddle/fluid/tests/unittests/test_scale_op.py +++ b/python/paddle/fluid/tests/unittests/test_scale_op.py @@ -215,7 +215,7 @@ class TestScaleApiStatic(unittest.TestCase): exe = paddle.static.Executor(place=paddle.CPUPlace()) out = exe.run(main_prog, feed={"x": input}, fetch_list=[out]) - self.assertEqual(np.array_equal(out[0], input * 2.0 + 3.0), True) + np.testing.assert_array_equal(out[0], input * 2.0 + 3.0) class TestScaleInplaceApiStatic(TestScaleApiStatic): @@ -234,7 +234,7 @@ class TestScaleApiDygraph(unittest.TestCase): input = np.random.random([2, 25]).astype("float32") x = paddle.to_tensor(input) out = self._executed_api(x, scale=2.0, bias=3.0) - self.assertEqual(np.array_equal(out.numpy(), input * 2.0 + 3.0), True) + np.testing.assert_array_equal(out.numpy(), input * 2.0 + 3.0) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_scatter_nd_op.py b/python/paddle/fluid/tests/unittests/test_scatter_nd_op.py index 1833f36013d..ecf67c71cbd 100644 --- a/python/paddle/fluid/tests/unittests/test_scatter_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_scatter_nd_op.py @@ -244,8 +244,7 @@ class TestScatterNdOpAPI(unittest.TestCase): cpu_value = paddle.scatter_nd_add(paddle.to_tensor(x), paddle.to_tensor(index), paddle.to_tensor(val)) - self.assertTrue(np.array_equal(gpu_value.numpy(), - cpu_value.numpy())) + np.testing.assert_array_equal(gpu_value.numpy(), cpu_value.numpy()) paddle.set_device(device) @switch_to_static_graph @@ -267,7 +266,7 @@ class TestScatterNdOpAPI(unittest.TestCase): gpu_value = gpu_exe.run(feed=feed, fetch_list=fetch)[0] cpu_exe = paddle.static.Executor(paddle.CPUPlace()) cpu_value = cpu_exe.run(feed=feed, fetch_list=fetch)[0] - self.assertTrue(np.array_equal(gpu_value, cpu_value)) + np.testing.assert_array_equal(gpu_value, cpu_value) test_static_graph() diff --git a/python/paddle/fluid/tests/unittests/test_scatter_op.py b/python/paddle/fluid/tests/unittests/test_scatter_op.py index 2fe162d8090..d54ed142178 100644 --- a/python/paddle/fluid/tests/unittests/test_scatter_op.py +++ b/python/paddle/fluid/tests/unittests/test_scatter_op.py @@ -289,7 +289,7 @@ class TestScatterAPI(unittest.TestCase): gpu_value = gpu_exe.run(feed=feed, fetch_list=fetch)[0] return gpu_value - self.assertTrue(np.array_equal(test_dygraph(), test_static_graph())) + np.testing.assert_array_equal(test_dygraph(), test_static_graph()) @unittest.skipIf(not core.is_compiled_with_cuda(), diff --git a/python/paddle/fluid/tests/unittests/test_set_value_op.py b/python/paddle/fluid/tests/unittests/test_set_value_op.py index 9aee71af416..59ccff3973f 100644 --- a/python/paddle/fluid/tests/unittests/test_set_value_op.py +++ b/python/paddle/fluid/tests/unittests/test_set_value_op.py @@ -1133,13 +1133,15 @@ class TestGradientTruncated(unittest.TestCase): [[2916., 4000., 5324., 6912.]]]], [[[[0., 0., 0., 0.]], [[0., 0., 0., 0.]], [[0., 0., 0., 0.]]]]]]) - self.assertTrue( - np.array_equal(inps.grad.numpy(), input_grad), - msg="The gradient of value should be \n{},\n but reveived {}". + np.testing.assert_array_equal( + inps.grad.numpy(), + input_grad, + err_msg='The gradient of value should be \n{},\n but reveived {}'. format(input_grad, inps.grad.numpy())) - self.assertTrue( - np.array_equal(value.grad.numpy(), value_grad), - msg="The gradient of input should be \n{},\n but reveived {}". + np.testing.assert_array_equal( + value.grad.numpy(), + value_grad, + err_msg='The gradient of input should be \n{},\n but reveived {}'. format(value_grad, value.grad.numpy())) # case 2 @@ -1159,13 +1161,15 @@ class TestGradientTruncated(unittest.TestCase): [16384., 19652., 23328.]], [[27436., 32000., 37044.], [42592., 48668., 55296.]]]) - self.assertTrue( - np.array_equal(inps2.grad.numpy(), input_grad2), - msg="The gradient of value should be \n{},\n but reveived {}". + np.testing.assert_array_equal( + inps2.grad.numpy(), + input_grad2, + err_msg='The gradient of value should be \n{},\n but reveived {}'. format(input_grad, inps2.grad.numpy())) - self.assertTrue( - np.array_equal(value2.grad.numpy(), value_grad2), - msg="The gradient of input should be \n{},\n but reveived {}". + np.testing.assert_array_equal( + value2.grad.numpy(), + value_grad2, + err_msg='The gradient of input should be \n{},\n but reveived {}'. format(value_grad, value2.grad.numpy())) # case 3 @@ -1196,13 +1200,15 @@ class TestGradientTruncated(unittest.TestCase): [[[[[27436.], [32000.]]]], [[[[37044.], [42592.]]]], [[[[48668.], [55296.]]]]]]) - self.assertTrue( - np.array_equal(inps.grad.numpy(), input_grad), - msg="The gradient of value should be \n{},\n but reveived {}". + np.testing.assert_array_equal( + inps.grad.numpy(), + input_grad, + err_msg='The gradient of value should be \n{},\n but reveived {}'. format(input_grad, inps.grad.numpy())) - self.assertTrue( - np.array_equal(value.grad.numpy(), value_grad), - msg="The gradient of input should be \n{},\n but reveived {}". + np.testing.assert_array_equal( + value.grad.numpy(), + value_grad, + err_msg='The gradient of input should be \n{},\n but reveived {}'. format(value_grad, value.grad.numpy())) #case 4: step >0 @@ -1229,13 +1235,15 @@ class TestGradientTruncated(unittest.TestCase): [[[[8788.], [10976.], [13500.], [16384.]]], [[[19652.], [23328.], [27436.], [32000.]]], [[[37044.], [42592.], [48668.], [55296.]]]]]) - self.assertTrue( - np.array_equal(inps.grad.numpy(), input_grad), - msg="The gradient of value should be \n{},\n but reveived {}". + np.testing.assert_array_equal( + inps.grad.numpy(), + input_grad, + err_msg='The gradient of value should be \n{},\n but reveived {}'. format(input_grad, inps.grad.numpy())) - self.assertTrue( - np.array_equal(value.grad.numpy(), value_grad), - msg="The gradient of input should be \n{},\n but reveived {}". + np.testing.assert_array_equal( + value.grad.numpy(), + value_grad, + err_msg='The gradient of input should be \n{},\n but reveived {}'. format(value_grad, value.grad.numpy())) # case 5:a[0].shape==value.shape @@ -1262,13 +1270,15 @@ class TestGradientTruncated(unittest.TestCase): [[8788., 10976., 13500., 16384.], [19652., 23328., 27436., 32000.], [37044., 42592., 48668., 55296.]]]) - self.assertTrue( - np.array_equal(inps.grad.numpy(), input_grad), - msg="The gradient of value should be \n{},\n but reveived {}". + np.testing.assert_array_equal( + inps.grad.numpy(), + input_grad, + err_msg='The gradient of value should be \n{},\n but reveived {}'. format(input_grad, inps.grad.numpy())) - self.assertTrue( - np.array_equal(value.grad.numpy(), value_grad), - msg="The gradient of input should be \n{},\n but reveived {}". + np.testing.assert_array_equal( + value.grad.numpy(), + value_grad, + err_msg='The gradient of input should be \n{},\n but reveived {}'. format(value_grad, value.grad.numpy())) # case 6: pass stop_gradient from value to x @@ -1472,7 +1482,7 @@ class TestSetValueInplace(unittest.TestCase): b[paddle.to_tensor(0)] = 1.0 self.assertTrue(id(b) == id(c)) - self.assertTrue(np.array_equal(b.numpy(), c.numpy())) + np.testing.assert_array_equal(b.numpy(), c.numpy()) self.assertEqual(b.inplace_version, 1) paddle.enable_static() @@ -1510,8 +1520,8 @@ class TestSetValueInplaceLeafVar(unittest.TestCase): a_grad_2 = a.grad.numpy() b_grad_2 = b.grad.numpy() - self.assertTrue(np.array_equal(a_grad_1, a_grad_2)) - self.assertTrue(np.array_equal(b_grad_1, b_grad_2)) + np.testing.assert_array_equal(a_grad_1, a_grad_2) + np.testing.assert_array_equal(b_grad_1, b_grad_2) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_shuffle_batch_op.py b/python/paddle/fluid/tests/unittests/test_shuffle_batch_op.py index 6292a4d2b51..962acad43ed 100644 --- a/python/paddle/fluid/tests/unittests/test_shuffle_batch_op.py +++ b/python/paddle/fluid/tests/unittests/test_shuffle_batch_op.py @@ -68,7 +68,7 @@ class TestShuffleBatchOpBase(OpTest): assert y is not None sort_x = self.sort_array(x) sort_y = self.sort_array(y) - self.assertTrue(np.array_equal(sort_x, sort_y)) + np.testing.assert_array_equal(sort_x, sort_y) def sort_array(self, array): shape = array.shape diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index 41a4d35d906..ddf0af21cdd 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -600,7 +600,7 @@ class TestSliceApiWithTensor(unittest.TestCase): ends=paddle.to_tensor(ends, dtype='int32')) a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends) - self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy())) + np.testing.assert_array_equal(a_1.numpy(), a_2.numpy()) def test_bool_tensor(self): with paddle.fluid.dygraph.guard(): @@ -616,7 +616,7 @@ class TestSliceApiWithTensor(unittest.TestCase): y_np = tt[0:3, 1:5, 2:4] self.assertTrue(paddle.bool == y_paddle.dtype) - self.assertTrue(np.array_equal(y_paddle.numpy(), y_np)) + np.testing.assert_array_equal(y_paddle.numpy(), y_np) class TestSliceApiEager(unittest.TestCase): @@ -635,11 +635,11 @@ class TestSliceApiEager(unittest.TestCase): axes=axes, starts=paddle.to_tensor(starts), ends=paddle.to_tensor(ends)) - self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy())) + np.testing.assert_array_equal(a_1.numpy(), a_2.numpy()) a_1.backward() grad_truth = paddle.zeros_like(a) grad_truth[-3:3, 0:2, 2:4] = 1 - self.assertTrue(np.array_equal(grad_truth, a.gradient())) + np.testing.assert_array_equal(grad_truth, a.gradient()) self.assertTrue(np.allclose(a_1.numpy(), a[-3:3, 0:2, 2:4])) @@ -710,10 +710,10 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase): self.assertTrue(self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR) self.assertEqual(self.sliced_arr.shape, self.shape) - self.assertTrue(np.array_equal(self.out, self.data)) - self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) - self.assertTrue(np.array_equal(self.g_x1, np.zeros_like(self.data))) - self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data))) + np.testing.assert_array_equal(self.out, self.data) + np.testing.assert_array_equal(self.g_x0, np.ones_like(self.data)) + np.testing.assert_array_equal(self.g_x1, np.zeros_like(self.data)) + np.testing.assert_array_equal(self.g_x2, np.zeros_like(self.data)) def test_case_2(self): main_program = fluid.Program() @@ -722,12 +722,11 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase): self.assertTrue( self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY) self.assertEqual(self.sliced_arr.shape, self.shape) - self.assertTrue( - np.array_equal(self.out, - np.stack([self.data, self.data], axis=self.axis))) - self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) - self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data))) - self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data))) + np.testing.assert_array_equal( + self.out, np.stack([self.data, self.data], axis=self.axis)) + np.testing.assert_array_equal(self.g_x0, np.ones_like(self.data)) + np.testing.assert_array_equal(self.g_x1, np.ones_like(self.data)) + np.testing.assert_array_equal(self.g_x2, np.zeros_like(self.data)) def test_case_3(self): main_program = fluid.Program() @@ -736,13 +735,12 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase): self.assertTrue( self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY) self.assertEqual(self.sliced_arr.shape, self.shape) - self.assertTrue( - np.array_equal( - self.out, - np.stack([self.data, self.data, self.data], axis=self.axis))) - self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) - self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data))) - self.assertTrue(np.array_equal(self.g_x2, np.ones_like(self.data))) + np.testing.assert_array_equal( + self.out, np.stack([self.data, self.data, self.data], + axis=self.axis)) + np.testing.assert_array_equal(self.g_x0, np.ones_like(self.data)) + np.testing.assert_array_equal(self.g_x1, np.ones_like(self.data)) + np.testing.assert_array_equal(self.g_x2, np.ones_like(self.data)) class TestImperativeVarBaseGetItem(unittest.TestCase): @@ -796,11 +794,11 @@ class TestInferShape(unittest.TestCase): 100, ], [0], [1]) np_slice = x_arr[:, :, 0:1] - self.assertTrue(np.array_equal(pp_slice, np_slice)) + np.testing.assert_array_equal(pp_slice, np_slice) pp_slice = paddle.slice(x, (-100, ), [0], [1]) np_slice = x_arr[0:1] - self.assertTrue(np.array_equal(pp_slice, np_slice)) + np.testing.assert_array_equal(pp_slice, np_slice) x_arr = np.array([], dtype=np.float32) x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0))) diff --git a/python/paddle/fluid/tests/unittests/test_split_op.py b/python/paddle/fluid/tests/unittests/test_split_op.py index 4fb33e53baf..2c71ada5612 100644 --- a/python/paddle/fluid/tests/unittests/test_split_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_op.py @@ -511,7 +511,7 @@ class API_TestDygraphSplit(unittest.TestCase): num1 = paddle.full(shape=[1], fill_value=1, dtype='int32') x0 = paddle.split(input, num_or_sections=[-1], axis=num1) x0_out = x0[0].numpy() - self.assertTrue(np.array_equal(x0_out, input.numpy())) + np.testing.assert_array_equal(x0_out, input.numpy()) def test_negative_one_section(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_split_program.py b/python/paddle/fluid/tests/unittests/test_split_program.py index ff8348eb719..7ed33ebde4c 100644 --- a/python/paddle/fluid/tests/unittests/test_split_program.py +++ b/python/paddle/fluid/tests/unittests/test_split_program.py @@ -66,8 +66,10 @@ class TestSplitProgram(unittest.TestCase): self.assertEqual(len(vars_actual), len(vars_expected)) for actual, expected in zip(vars_actual, vars_expected): self.assertEqual(actual.shape, expected.shape) - self.assertTrue(np.array_equal(actual, expected), - '{}\n{}\n'.format(actual, expected)) + np.testing.assert_array_equal(actual, + expected, + err_msg='{}\n{}\n'.format( + actual, expected)) def get_places(self): places = [paddle.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py b/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py index 1c28393f330..8c4131d71d0 100644 --- a/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py @@ -62,7 +62,7 @@ class TestL2LossDeterministic(unittest.TestCase): x = paddle.to_tensor(x_np) y1 = _C_ops.squared_l2_norm(x) y2 = _C_ops.squared_l2_norm(x) - self.assertTrue(np.array_equal(y1.numpy(), y2.numpy())) + np.testing.assert_array_equal(y1.numpy(), y2.numpy()) def test_main(self): self.check_place(paddle.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_stack_op.py b/python/paddle/fluid/tests/unittests/test_stack_op.py index 6f4e490be6b..5bd9e2634a0 100644 --- a/python/paddle/fluid/tests/unittests/test_stack_op.py +++ b/python/paddle/fluid/tests/unittests/test_stack_op.py @@ -174,9 +174,8 @@ class TestStackAPIWithLoDTensorArray(unittest.TestCase): self.assertTrue(self.out_var.shape[self.axis] == -1) exe = fluid.Executor(self.place) res = exe.run(self.program, fetch_list=self.out_var) - self.assertTrue( - np.array_equal(res[0], - np.stack([self.x] * self.iter_num, axis=self.axis))) + np.testing.assert_array_equal( + res[0], np.stack([self.x] * self.iter_num, axis=self.axis)) class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): @@ -209,9 +208,8 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): self.assertTrue(self.out_var.shape[self.axis] == -1) exe = fluid.Executor(self.place) res = exe.run(self.program, fetch_list=self.out_var) - self.assertTrue( - np.array_equal(res[0], - np.stack([self.x] * self.iter_num, axis=self.axis))) + np.testing.assert_array_equal( + res[0], np.stack([self.x] * self.iter_num, axis=self.axis)) class API_test(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_static_save_load.py b/python/paddle/fluid/tests/unittests/test_static_save_load.py index 8a4f8f92013..c47daba9684 100644 --- a/python/paddle/fluid/tests/unittests/test_static_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_static_save_load.py @@ -332,7 +332,7 @@ class TestSaveLoadBase(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) temp_dir.cleanup() @@ -450,7 +450,7 @@ class TestSaveLoadPartial(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) fluid.load(test_program, os.path.join(temp_dir.name, "test_1.pdmodel"), None) temp_dir.cleanup() @@ -561,7 +561,7 @@ class TestSaveLoadSetStateDict(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) temp_dir.cleanup() @@ -691,7 +691,7 @@ class TestProgramStatePartial(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) # check 1 for var in main_program.list_vars(): @@ -711,7 +711,7 @@ class TestProgramStatePartial(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) # check 2 for var in main_program.list_vars(): @@ -731,7 +731,7 @@ class TestProgramStatePartial(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) # check 3 for var in main_program.list_vars(): @@ -751,7 +751,7 @@ class TestProgramStatePartial(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) temp_dir.cleanup() @@ -840,7 +840,7 @@ class TestVariableInit(unittest.TestCase): new_t = np.array(new_scope.find_var(var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) temp_dir.cleanup() @@ -966,7 +966,7 @@ class TestLoadFromOldInterface(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: @@ -1096,7 +1096,7 @@ class TestLoadFromOldInterface(unittest.TestCase): if var.name in var_list_names: # loaded vars base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) else: #not loaded vars self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -1213,7 +1213,7 @@ class TestLoadFromOldInterfaceSingleFile(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) # test exception # change shape @@ -1400,7 +1400,7 @@ class TestProgramStateOldSave(unittest.TestCase): with fluid.dygraph.guard(place): load_state = fluid.load_program_state(save_dir) for k, v in load_state.items(): - self.assertTrue(np.array_equal(base_map[k], v)) + np.testing.assert_array_equal(base_map[k], v) def create_symlink(self, target, link_name): try: @@ -1416,7 +1416,7 @@ class TestProgramStateOldSave(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) class TestProgramStateOldSaveSingleModel(unittest.TestCase): @@ -1540,7 +1540,7 @@ class TestProgramStateOldSaveSingleModel(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) with self.assertRaises(ValueError): fluid.load_program_state(os.path.join(save_dir, "model_1")) @@ -1623,7 +1623,7 @@ class TestStaticSaveLoadPickle(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_static_save_load_bf16.py b/python/paddle/fluid/tests/unittests/test_static_save_load_bf16.py index 6da849a44bd..dc46578b049 100644 --- a/python/paddle/fluid/tests/unittests/test_static_save_load_bf16.py +++ b/python/paddle/fluid/tests/unittests/test_static_save_load_bf16.py @@ -141,7 +141,7 @@ class TestSaveLoadBF16(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_static_save_load_large.py b/python/paddle/fluid/tests/unittests/test_static_save_load_large.py index e45cd59b444..0a417df56c3 100644 --- a/python/paddle/fluid/tests/unittests/test_static_save_load_large.py +++ b/python/paddle/fluid/tests/unittests/test_static_save_load_large.py @@ -75,7 +75,7 @@ class TestStaticSaveLoadLargeParameters(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) # set var to zero for var in prog.list_vars(): @@ -94,7 +94,7 @@ class TestStaticSaveLoadLargeParameters(unittest.TestCase): new_t = np.array(fluid.global_scope().find_var( var.name).get_tensor()) base_t = base_map[var.name] - self.assertTrue(np.array_equal(new_t, base_t)) + np.testing.assert_array_equal(new_t, base_t) temp_dir.cleanup() diff --git a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py index e8d42a2fae8..b68fbd9468c 100644 --- a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py @@ -748,10 +748,11 @@ class TestStridedSliceTensorArray(unittest.TestCase): net.clear_all_grad() # compare result of dygraph and static self.is_grads_equal(grads_static, grads_dy) - self.assertTrue( - np.array_equal(s1, s2), - msg="dygraph graph result:\n{} \nstatic dygraph result:\n{}".format( - l1.numpy(), l2.numpy())) + np.testing.assert_array_equal( + s1, + s2, + err_msg='dygraph graph result:\n{} \nstatic dygraph result:\n{}'. + format(l1.numpy(), l2.numpy())) def test_strided_slice_tensor_array_cuda_pinned_place(self): if paddle.device.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py index ad226878f7e..2d2bc8487ca 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_op.py @@ -112,11 +112,9 @@ class TestSelectedRowsSumOp(unittest.TestCase): if has_data_w_num > 0: self.assertEqual(len(out.rows()), 7) - self.assertTrue( - np.array_equal( - np.array(out.get_tensor()), - self._get_array(self.rows, self.row_numel) * - has_data_w_num)) + np.testing.assert_array_equal( + np.array(out.get_tensor()), + self._get_array(self.rows, self.row_numel) * has_data_w_num) else: self.assertEqual(len(out.rows()), 0) @@ -252,13 +250,10 @@ class TestLoDTensorAndSelectedRowsOp(TestSelectedRowsSumOp): out_t = np.array(out) self.assertEqual(out_t.shape[0], self.height) - self.assertTrue( - np.array_equal( - out_t, - self._get_array([i - for i in range(self.height)], self.row_numel) * - np.tile( - np.array(result).reshape(self.height, 1), self.row_numel))) + np.testing.assert_array_equal( + out_t, + self._get_array([i for i in range(self.height)], self.row_numel) * + np.tile(np.array(result).reshape(self.height, 1), self.row_numel)) def create_lod_tensor(self, scope, place, var_name): var = scope.var(var_name) diff --git a/python/paddle/fluid/tests/unittests/test_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor.py index 2ea88c89a37..94db0cfe067 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor.py @@ -17,7 +17,7 @@ from __future__ import print_function import paddle.fluid as fluid import paddle.fluid.core as core import unittest -import numpy +import numpy as np import numbers @@ -25,7 +25,7 @@ class TestTensorPtr(unittest.TestCase): def test_tensor_ptr(self): t = core.Tensor() - np_arr = numpy.zeros([2, 3]) + np_arr = np.zeros([2, 3]) t.set(np_arr, core.CPUPlace()) self.assertGreater(t._ptr(), 0) @@ -47,13 +47,13 @@ class TestTensor(unittest.TestCase): tensor._set_dims([1000, 784]) tensor._alloc_int(place) - tensor_array = numpy.array(tensor) + tensor_array = np.array(tensor) self.assertEqual((1000, 784), tensor_array.shape) tensor_array[3, 9] = 1 tensor_array[19, 11] = 2 tensor.set(tensor_array, place) - tensor_array_2 = numpy.array(tensor) + tensor_array_2 = np.array(tensor) self.assertEqual(1, tensor_array_2[3, 9]) self.assertEqual(2, tensor_array_2[19, 11]) @@ -67,13 +67,13 @@ class TestTensor(unittest.TestCase): tensor._set_dims([1000, 784]) tensor._alloc_float(place) - tensor_array = numpy.array(tensor) + tensor_array = np.array(tensor) self.assertEqual((1000, 784), tensor_array.shape) tensor_array[3, 9] = 1.0 tensor_array[19, 11] = 2.0 tensor.set(tensor_array, place) - tensor_array_2 = numpy.array(tensor) + tensor_array_2 = np.array(tensor) self.assertAlmostEqual(1.0, tensor_array_2[3, 9]) self.assertAlmostEqual(2.0, tensor_array_2[19, 11]) @@ -81,24 +81,24 @@ class TestTensor(unittest.TestCase): scope = core.Scope() var = scope.var("int8_tensor") cpu_tensor = var.get_tensor() - tensor_array = numpy.random.randint(-127, - high=128, - size=[100, 200], - dtype=numpy.int8) + tensor_array = np.random.randint(-127, + high=128, + size=[100, 200], + dtype=np.int8) place = core.CPUPlace() cpu_tensor.set(tensor_array, place) - cpu_tensor_array_2 = numpy.array(cpu_tensor) + cpu_tensor_array_2 = np.array(cpu_tensor) self.assertAlmostEqual(cpu_tensor_array_2.all(), tensor_array.all()) if core.is_compiled_with_cuda(): cuda_tensor = var.get_tensor() - tensor_array = numpy.random.randint(-127, - high=128, - size=[100, 200], - dtype=numpy.int8) + tensor_array = np.random.randint(-127, + high=128, + size=[100, 200], + dtype=np.int8) place = core.CUDAPlace(0) cuda_tensor.set(tensor_array, place) - cuda_tensor_array_2 = numpy.array(cuda_tensor) + cuda_tensor_array_2 = np.array(cuda_tensor) self.assertAlmostEqual(cuda_tensor_array_2.all(), tensor_array.all()) @@ -110,14 +110,14 @@ class TestTensor(unittest.TestCase): lod_tensor._set_dims([4, 4, 6]) lod_tensor._alloc_int(place) - array = numpy.array(lod_tensor) + array = np.array(lod_tensor) array[0, 0, 0] = 3 array[3, 3, 5] = 10 lod_tensor.set(array, place) lod_tensor.set_recursive_sequence_lengths([[2, 2]]) - lod_v = numpy.array(lod_tensor) - self.assertTrue(numpy.alltrue(array == lod_v)) + lod_v = np.array(lod_tensor) + self.assertTrue(np.alltrue(array == lod_v)) lod = lod_tensor.recursive_sequence_lengths() self.assertEqual(2, lod[0][0]) @@ -132,13 +132,13 @@ class TestTensor(unittest.TestCase): lod_tensor._set_dims([5, 2, 3, 4]) lod_tensor._alloc_float(place) - tensor_array = numpy.array(lod_tensor) + tensor_array = np.array(lod_tensor) self.assertEqual((5, 2, 3, 4), tensor_array.shape) tensor_array[0, 0, 0, 0] = 1.0 tensor_array[0, 0, 0, 1] = 2.0 lod_tensor.set(tensor_array, place) - lod_v = numpy.array(lod_tensor) + lod_v = np.array(lod_tensor) self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) self.assertEqual(len(lod_tensor.recursive_sequence_lengths()), 0) @@ -156,12 +156,12 @@ class TestTensor(unittest.TestCase): lod_tensor._set_dims([5, 2, 3, 4]) lod_tensor.set_recursive_sequence_lengths(lod_py) lod_tensor._alloc_float(place) - tensor_array = numpy.array(lod_tensor) + tensor_array = np.array(lod_tensor) tensor_array[0, 0, 0, 0] = 1.0 tensor_array[0, 0, 0, 1] = 2.0 lod_tensor.set(tensor_array, place) - lod_v = numpy.array(lod_tensor) + lod_v = np.array(lod_tensor) self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) self.assertListEqual(lod_py, lod_tensor.recursive_sequence_lengths()) @@ -176,12 +176,12 @@ class TestTensor(unittest.TestCase): lod_tensor._set_dims([5, 2, 3, 4]) lod_tensor.set_recursive_sequence_lengths(lod_py) lod_tensor._alloc_float(place) - tensor_array = numpy.array(lod_tensor) + tensor_array = np.array(lod_tensor) tensor_array[0, 0, 0, 0] = 1.0 tensor_array[0, 0, 0, 1] = 2.0 lod_tensor.set(tensor_array, place) - lod_v = numpy.array(lod_tensor) + lod_v = np.array(lod_tensor) self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) self.assertListEqual(lod_py, lod_tensor.recursive_sequence_lengths()) @@ -195,13 +195,13 @@ class TestTensor(unittest.TestCase): tensor._set_dims([0, 1]) tensor._alloc_float(place) - tensor_array = numpy.array(tensor) + tensor_array = np.array(tensor) self.assertEqual((0, 1), tensor_array.shape) if core.is_compiled_with_cuda(): gpu_place = core.CUDAPlace(0) tensor._alloc_float(gpu_place) - tensor_array = numpy.array(tensor) + tensor_array = np.array(tensor) self.assertEqual((0, 1), tensor_array.shape) def run_slice_tensor(self, place, dtype): @@ -209,43 +209,43 @@ class TestTensor(unittest.TestCase): shape = [3, 3, 3] tensor._set_dims(shape) - tensor_array = numpy.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], - [[10, 11, 12], [13, 14, 15], [16, 17, 18]], - [[19, 20, 21], [22, 23, 24], - [25, 26, 27]]]).astype(dtype) + tensor_array = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]], + [[19, 20, 21], [22, 23, 24], + [25, 26, 27]]]).astype(dtype) tensor.set(tensor_array, place) n1 = tensor[1] t1 = tensor_array[1] - self.assertTrue((numpy.array(n1) == numpy.array(t1)).all()) + self.assertTrue((np.array(n1) == np.array(t1)).all()) n2 = tensor[1:] t2 = tensor_array[1:] - self.assertTrue((numpy.array(n2) == numpy.array(t2)).all()) + self.assertTrue((np.array(n2) == np.array(t2)).all()) n3 = tensor[0:2:] t3 = tensor_array[0:2:] - self.assertTrue((numpy.array(n3) == numpy.array(t3)).all()) + self.assertTrue((np.array(n3) == np.array(t3)).all()) n4 = tensor[2::-2] t4 = tensor_array[2::-2] - self.assertTrue((numpy.array(n4) == numpy.array(t4)).all()) + self.assertTrue((np.array(n4) == np.array(t4)).all()) n5 = tensor[2::-2][0] t5 = tensor_array[2::-2][0] - self.assertTrue((numpy.array(n5) == numpy.array(t5)).all()) + self.assertTrue((np.array(n5) == np.array(t5)).all()) n6 = tensor[2:-1:-1] t6 = tensor_array[2:-1:-1] - self.assertTrue((numpy.array(n6) == numpy.array(t6)).all()) + self.assertTrue((np.array(n6) == np.array(t6)).all()) n7 = tensor[0:, 0:] t7 = tensor_array[0:, 0:] - self.assertTrue((numpy.array(n7) == numpy.array(t7)).all()) + self.assertTrue((np.array(n7) == np.array(t7)).all()) n8 = tensor[0::1, 0::-1, 2:] t8 = tensor_array[0::1, 0::-1, 2:] - self.assertTrue((numpy.array(n8) == numpy.array(t8)).all()) + self.assertTrue((np.array(n8) == np.array(t8)).all()) def test_slice_tensor(self): for dtype in self.support_dtypes: @@ -264,7 +264,7 @@ class TestTensor(unittest.TestCase): tensor = var.get_tensor() tensor._set_dims([10, 10]) tensor._alloc_int(place) - tensor_array = numpy.array(tensor) + tensor_array = np.array(tensor) self.assertEqual((10, 10), tensor_array.shape) tensor_array[0, 0] = 1 tensor_array[2, 2] = 2 @@ -302,62 +302,62 @@ class TestTensor(unittest.TestCase): numbers.Integral)) def test_tensor_set_fp16(self): - array = numpy.random.random((300, 500)).astype("float16") + array = np.random.random((300, 500)).astype("float16") tensor = fluid.Tensor() place = core.CPUPlace() tensor.set(array, place) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16) - self.assertTrue(numpy.array_equal(numpy.array(tensor), array)) + np.testing.assert_array_equal(np.array(tensor), array) if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) tensor.set(array, place) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16) - self.assertTrue(numpy.array_equal(numpy.array(tensor), array)) + np.testing.assert_array_equal(np.array(tensor), array) place = core.CUDAPinnedPlace() tensor.set(array, place) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16) - self.assertTrue(numpy.array_equal(numpy.array(tensor), array)) + np.testing.assert_array_equal(np.array(tensor), array) def test_tensor_set_int16(self): - array = numpy.random.randint(100, size=(300, 500)).astype("int16") + array = np.random.randint(100, size=(300, 500)).astype("int16") tensor = fluid.Tensor() place = core.CPUPlace() tensor.set(array, place) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16) - self.assertTrue(numpy.array_equal(numpy.array(tensor), array)) + np.testing.assert_array_equal(np.array(tensor), array) if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) tensor.set(array, place) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16) - self.assertTrue(numpy.array_equal(numpy.array(tensor), array)) + np.testing.assert_array_equal(np.array(tensor), array) place = core.CUDAPinnedPlace() tensor.set(array, place) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16) - self.assertTrue(numpy.array_equal(numpy.array(tensor), array)) + np.testing.assert_array_equal(np.array(tensor), array) def test_tensor_set_from_array_list(self): - array = numpy.random.randint(1000, size=(200, 300)) + array = np.random.randint(1000, size=(200, 300)) list_array = [array, array] tensor = fluid.Tensor() place = core.CPUPlace() tensor.set(list_array, place) self.assertEqual([2, 200, 300], tensor.shape()) - self.assertTrue(numpy.array_equal(numpy.array(tensor), list_array)) + np.testing.assert_array_equal(np.array(tensor), list_array) if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) tensor.set(list_array, place) self.assertEqual([2, 200, 300], tensor.shape()) - self.assertTrue(numpy.array_equal(numpy.array(tensor), list_array)) + np.testing.assert_array_equal(np.array(tensor), list_array) place = core.CUDAPinnedPlace() tensor.set(list_array, place) self.assertEqual([2, 200, 300], tensor.shape()) - self.assertTrue(numpy.array_equal(numpy.array(tensor), list_array)) + np.testing.assert_array_equal(np.array(tensor), list_array) def test_tensor_set_error(self): scope = core.Scope() diff --git a/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py index d9c4d2c61b2..6eef408f5e3 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest -import numpy +import numpy as np import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.op import Operator @@ -28,7 +28,7 @@ class TestTensorArrayToTensorError(unittest.TestCase): def test_errors(self): with program_guard(Program()): - input_data = numpy.random.random((2, 4)).astype("float32") + input_data = np.random.random((2, 4)).astype("float32") def test_Variable(): fluid.layers.tensor_array_to_tensor(input=input_data) @@ -65,14 +65,14 @@ class TestLoDTensorArrayConcat(unittest.TestCase): for i in range(10): t = core.LoDTensor() if i == 0: - t.set(numpy.array([[i], [i]], dtype='float32'), cpu) + t.set(np.array([[i], [i]], dtype='float32'), cpu) else: - t.set(numpy.array([[i]], dtype='float32'), cpu) + t.set(np.array([[i]], dtype='float32'), cpu) input_tensor_array.append(t) self.assertEqual(10, len(input_tensor_array)) - random_grad = numpy.random.random_sample([11]).astype(numpy.float32) + random_grad = np.random.random_sample([11]).astype(np.float32) y_out = block.create_var(name="Out") y_out.persistable = True @@ -119,13 +119,13 @@ class TestLoDTensorArrayConcat(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) out = exe.run(program, fetch_list=fetch_list, scope=scope) - #print ("index: ", numpy.array(out[1])) + #print ("index: ", np.array(out[1])) # test forward - tensor_res = numpy.array(out[0]) - tensor_res_out_idx = numpy.array(out[1]) - tensor_gt = numpy.array([0] + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], - dtype='float32') + tensor_res = np.array(out[0]) + tensor_res_out_idx = np.array(out[1]) + tensor_gt = np.array([0] + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + dtype='float32') self.assertEqual(len(tensor_res), len(tensor_gt)) self.assertEqual(len(tensor_res_out_idx), 10) @@ -148,14 +148,13 @@ class TestLoDTensorArrayConcat(unittest.TestCase): for i in range(len(grad_tensor_array)): if i == 0: self.assertEqual( - numpy.array(grad_tensor_array[i])[0], - numpy.array(random_grad[i])) + np.array(grad_tensor_array[i])[0], np.array(random_grad[i])) self.assertEqual( - numpy.array(grad_tensor_array[i])[1], - numpy.array(random_grad[i + 1])) + np.array(grad_tensor_array[i])[1], + np.array(random_grad[i + 1])) if i == 1: - self.assertEqual(numpy.array(grad_tensor_array[i]), - numpy.array(random_grad[i + 1])) + self.assertEqual(np.array(grad_tensor_array[i]), + np.array(random_grad[i + 1])) class TestLoDTensorArrayStack(unittest.TestCase): @@ -165,16 +164,16 @@ class TestLoDTensorArrayStack(unittest.TestCase): self.op_type = "tensor_array_to_tensor" self.attrs = {"axis": 1, "use_stack": True} self.inputs = [ - numpy.random.rand(2, 3, 4).astype("float32"), - numpy.random.rand(2, 3, 4).astype("float32"), - numpy.random.rand(2, 3, 4).astype("float32") + np.random.rand(2, 3, 4).astype("float32"), + np.random.rand(2, 3, 4).astype("float32"), + np.random.rand(2, 3, 4).astype("float32") ] self.outputs = [ - numpy.stack(self.inputs, axis=self.attrs["axis"]), - numpy.array([x.shape[self.attrs["axis"]] for x in self.inputs], - dtype="int32") + np.stack(self.inputs, axis=self.attrs["axis"]), + np.array([x.shape[self.attrs["axis"]] for x in self.inputs], + dtype="int32") ] - self.input_grads = [numpy.ones_like(x) for x in self.inputs] + self.input_grads = [np.ones_like(x) for x in self.inputs] self.set_program() for var in self.program.list_vars(): # to avoid scope clearing after execution @@ -197,15 +196,13 @@ class TestLoDTensorArrayStack(unittest.TestCase): def run_check(self, executor, scope): executor.run(self.program, scope=scope) for i, output in enumerate(self.outputs): - numpy.allclose(numpy.array( + np.allclose(np.array( scope.var(self.output_vars[i].name).get_tensor()), - output, - atol=0) + output, + atol=0) tensor_array_grad = scope.var(self.array.name).get_lod_tensor_array() for i, input_grad in enumerate(self.input_grads): - numpy.allclose(numpy.array(tensor_array_grad[i]), - input_grad, - atol=0) + np.allclose(np.array(tensor_array_grad[i]), input_grad, atol=0) def test_cpu(self): scope = core.Scope() @@ -239,8 +236,8 @@ class TestTensorArrayToTensorAPI(unittest.TestCase): return output_stack, output_index_stack, output_concat, output_index_concat def test_case(self): - inp0 = numpy.random.rand(2, 3, 4).astype("float32") - inp1 = numpy.random.rand(2, 3, 4).astype("float32") + inp0 = np.random.rand(2, 3, 4).astype("float32") + inp1 = np.random.rand(2, 3, 4).astype("float32") _outs_static = self._test_case(inp0, inp1) place = fluid.CPUPlace() @@ -251,7 +248,7 @@ class TestTensorArrayToTensorAPI(unittest.TestCase): outs_dynamic = self._test_case(inp0, inp1) for s, d in zip(outs_static, outs_dynamic): - self.assertTrue(numpy.array_equal(s, d.numpy())) + np.testing.assert_array_equal(s, d.numpy()) def test_while_loop_case(self): with fluid.dygraph.guard(): @@ -259,7 +256,7 @@ class TestTensorArrayToTensorAPI(unittest.TestCase): i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1) ten = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) array = fluid.layers.create_array(dtype='float32') - inp0 = numpy.random.rand(2, 3, 4).astype("float32") + inp0 = np.random.rand(2, 3, 4).astype("float32") x0 = fluid.layers.assign(inp0) fluid.layers.array_write(x0, zero, array) @@ -275,9 +272,8 @@ class TestTensorArrayToTensorAPI(unittest.TestCase): self.assertTrue(fluid.layers.array_length(array), 10) last = fluid.layers.fill_constant(shape=[1], dtype='int64', value=9) - self.assertTrue( - numpy.array_equal( - fluid.layers.array_read(array, last).numpy(), inp0)) + np.testing.assert_array_equal( + fluid.layers.array_read(array, last).numpy(), inp0) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_tensor_copy_from.py b/python/paddle/fluid/tests/unittests/test_tensor_copy_from.py index 64c4be260ed..6c38d234990 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_copy_from.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_copy_from.py @@ -25,15 +25,15 @@ class TestTensorCopyFrom(unittest.TestCase): np_value = np.random.random(size=[10, 30]).astype('float32') t_src = Tensor() t_src.set(np_value, place) - self.assertTrue(np.array_equal(np_value, t_src)) + np.testing.assert_array_equal(np_value, t_src) t_dst1 = Tensor() t_dst1._copy_from(t_src, place) - self.assertTrue(np.array_equal(np_value, t_dst1)) + np.testing.assert_array_equal(np_value, t_dst1) t_dst2 = Tensor() t_dst2._copy_from(t_src, place, 5) - self.assertTrue(np.array_equal(np.array(np_value[0:5]), t_dst2)) + np.testing.assert_array_equal(np.array(np_value[0:5]), t_dst2) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py b/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py index 9767fb25243..07f5b036988 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py @@ -97,16 +97,16 @@ class TestTensorRegisterHook(unittest.TestCase): o.backward() # z.grad is not affected - self.assertTrue(np.array_equal(z.grad.numpy(), w.numpy())) + np.testing.assert_array_equal(z.grad.numpy(), w.numpy()) # w.grad is not changed by hook - self.assertTrue(np.array_equal(w.grad.numpy(), z.numpy())) + np.testing.assert_array_equal(w.grad.numpy(), z.numpy()) # x.grad and y.grad are changed if run hook - self.assertTrue( - np.array_equal(x.grad.numpy(), - z.numpy() * 2 if not removed else z.numpy())) - self.assertTrue( - np.array_equal(y.grad.numpy(), - z.numpy() * 2 if not removed else z.numpy())) + np.testing.assert_array_equal( + x.grad.numpy(), + z.numpy() * 2 if not removed else z.numpy()) + np.testing.assert_array_equal( + y.grad.numpy(), + z.numpy() * 2 if not removed else z.numpy()) def run_print_hook_for_interior_var(print_hook, removed=False): for device in self.devices: @@ -133,10 +133,10 @@ class TestTensorRegisterHook(unittest.TestCase): o.backward() # all grads are not affected - self.assertTrue(np.array_equal(z.grad.numpy(), w.numpy())) - self.assertTrue(np.array_equal(w.grad.numpy(), z.numpy())) - self.assertTrue(np.array_equal(x.grad.numpy(), z.numpy())) - self.assertTrue(np.array_equal(y.grad.numpy(), z.numpy())) + np.testing.assert_array_equal(z.grad.numpy(), w.numpy()) + np.testing.assert_array_equal(w.grad.numpy(), z.numpy()) + np.testing.assert_array_equal(x.grad.numpy(), z.numpy()) + np.testing.assert_array_equal(y.grad.numpy(), z.numpy()) def double_hook(grad): grad = grad * 2 @@ -195,13 +195,13 @@ class TestTensorRegisterHook(unittest.TestCase): o.backward() # z.grad, w.grad, x.grad is not affected - self.assertTrue(np.array_equal(z.grad.numpy(), w.numpy())) - self.assertTrue(np.array_equal(w.grad.numpy(), z.numpy())) - self.assertTrue(np.array_equal(x.grad.numpy(), z.numpy())) + np.testing.assert_array_equal(z.grad.numpy(), w.numpy()) + np.testing.assert_array_equal(w.grad.numpy(), z.numpy()) + np.testing.assert_array_equal(x.grad.numpy(), z.numpy()) # y.grad are changed if run hook - self.assertTrue( - np.array_equal(y.grad.numpy(), - z.numpy() * 2 if not removed else z.numpy())) + np.testing.assert_array_equal( + y.grad.numpy(), + z.numpy() * 2 if not removed else z.numpy()) # register hook run_double_hook_for_leaf_var(lambda grad: grad * 2) @@ -255,15 +255,13 @@ class TestTensorRegisterHook(unittest.TestCase): base_grad = np.array([5., 9., 13., 19.]) # x.grad is not changed - self.assertTrue(np.array_equal(x.grad.numpy(), base_grad)) + np.testing.assert_array_equal(x.grad.numpy(), base_grad) # b.grad is changed by x.hook - self.assertTrue( - np.array_equal(b.grad.numpy(), - base_grad * 2 if not removed else base_grad)) + np.testing.assert_array_equal( + b.grad.numpy(), base_grad * 2 if not removed else base_grad) # a.grad is changed by x.hook and a.hook - self.assertTrue( - np.array_equal(a.grad.numpy(), - base_grad * 4 if not removed else base_grad)) + np.testing.assert_array_equal( + a.grad.numpy(), base_grad * 4 if not removed else base_grad) # register hook run_double_hook_for_accumulated_grad_interior_var(lambda grad: grad * 2) @@ -310,9 +308,8 @@ class TestTensorRegisterHook(unittest.TestCase): base_grad = np.array([5., 9., 13., 19.]) # x.grad is changed by x.hook - self.assertTrue( - np.array_equal(x.grad.numpy(), - base_grad * 2 if not removed else base_grad)) + np.testing.assert_array_equal( + x.grad.numpy(), base_grad * 2 if not removed else base_grad) # register hook run_double_hook_for_accumulated_grad_leaf_var(lambda grad: grad * 2) @@ -364,14 +361,14 @@ class TestTensorRegisterHook(unittest.TestCase): data, label, lambda grad: grad * 2, True, True) # compare original value and with hook - self.assertTrue(np.array_equal(ret1_grad, ret1_grad_hook)) - self.assertTrue(np.array_equal(linear1_w_grad * 2, linear1_w_grad_hook)) - self.assertTrue(np.array_equal(linear1_b_grad * 2, linear1_b_grad_hook)) + np.testing.assert_array_equal(ret1_grad, ret1_grad_hook) + np.testing.assert_array_equal(linear1_w_grad * 2, linear1_w_grad_hook) + np.testing.assert_array_equal(linear1_b_grad * 2, linear1_b_grad_hook) # compare original value and remove hook - self.assertTrue(np.array_equal(ret1_grad, ret1_grad_rm)) - self.assertTrue(np.array_equal(linear1_w_grad, linear1_w_grad_rm)) - self.assertTrue(np.array_equal(linear1_b_grad, linear1_b_grad_rm)) + np.testing.assert_array_equal(ret1_grad, ret1_grad_rm) + np.testing.assert_array_equal(linear1_w_grad, linear1_w_grad_rm) + np.testing.assert_array_equal(linear1_b_grad, linear1_b_grad_rm) def test_func_hook_in_model(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -427,37 +424,37 @@ class TestTensorRegisterHook(unittest.TestCase): z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var( device, hooks) - self.assertTrue(np.array_equal(w_grad, z)) - self.assertTrue(np.array_equal(x_grad, z * 8)) - self.assertTrue(np.array_equal(y_grad, z * 8)) + np.testing.assert_array_equal(w_grad, z) + np.testing.assert_array_equal(x_grad, z * 8) + np.testing.assert_array_equal(y_grad, z * 8) z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var( device, hooks, remove1=True) - self.assertTrue(np.array_equal(w_grad, z)) - self.assertTrue(np.array_equal(x_grad, z * 4)) - self.assertTrue(np.array_equal(y_grad, z * 4)) + np.testing.assert_array_equal(w_grad, z) + np.testing.assert_array_equal(x_grad, z * 4) + np.testing.assert_array_equal(y_grad, z * 4) z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var( device, hooks, remove2=True) - self.assertTrue(np.array_equal(w_grad, z)) - self.assertTrue(np.array_equal(x_grad, z * 4)) - self.assertTrue(np.array_equal(y_grad, z * 4)) + np.testing.assert_array_equal(w_grad, z) + np.testing.assert_array_equal(x_grad, z * 4) + np.testing.assert_array_equal(y_grad, z * 4) z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var( device, hooks, remove3=True) - self.assertTrue(np.array_equal(w_grad, z)) - self.assertTrue(np.array_equal(x_grad, z * 4)) - self.assertTrue(np.array_equal(y_grad, z * 4)) + np.testing.assert_array_equal(w_grad, z) + np.testing.assert_array_equal(x_grad, z * 4) + np.testing.assert_array_equal(y_grad, z * 4) z, w_grad, x_grad, y_grad = run_multiple_hooks_for_interior_var( device, hooks, remove1=True, remove2=True, remove3=True) - self.assertTrue(np.array_equal(w_grad, z)) - self.assertTrue(np.array_equal(x_grad, z)) - self.assertTrue(np.array_equal(y_grad, z)) + np.testing.assert_array_equal(w_grad, z) + np.testing.assert_array_equal(x_grad, z) + np.testing.assert_array_equal(y_grad, z) def test_multiple_hooks_for_interior_var(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -502,7 +499,7 @@ class TestTensorRegisterHook(unittest.TestCase): pass else: z.backward() - self.assertTrue(np.array_equal(x.grad.numpy(), np.array([8.]))) + np.testing.assert_array_equal(x.grad.numpy(), np.array([8.0])) def test_hook_in_double_grad(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py index ded9d42b9b5..64202ad377a 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py @@ -50,7 +50,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): raise ValueError("Unsupported operation.") self.assertEqual(c_rlt.dtype, c.dtype) - self.assertTrue(np.array_equal(c_rlt.numpy(), c.numpy())) + np.testing.assert_array_equal(c_rlt.numpy(), c.numpy()) def func_tensor_add_scalar(self): # tensor(int64) + scalar(int) diff --git a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py index 701ff5c3d6e..a030efd4488 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py @@ -58,7 +58,7 @@ class TestTensorScalarTypePromotionStatic(unittest.TestCase): rlt = exe.run(fetch_list=[c_rlt.name, c.name]) self.assertEqual(rlt[0].dtype, rlt[1].dtype) - self.assertTrue(np.array_equal(rlt[0], rlt[1])) + np.testing.assert_array_equal(rlt[0], rlt[1]) def test_tensor_add_scalar(self): # tensor(int64) + scalar(int) diff --git a/python/paddle/fluid/tests/unittests/test_translated_layer.py b/python/paddle/fluid/tests/unittests/test_translated_layer.py index ba44c78f2c7..f63e8d3eac0 100644 --- a/python/paddle/fluid/tests/unittests/test_translated_layer.py +++ b/python/paddle/fluid/tests/unittests/test_translated_layer.py @@ -132,7 +132,7 @@ class TestTranslatedLayer(unittest.TestCase): translated_layer.eval() pred = translated_layer(x) - self.assertTrue(np.array_equal(orig_pred.numpy(), pred.numpy())) + np.testing.assert_array_equal(orig_pred.numpy(), pred.numpy()) def load_and_fine_tuning(self): # load @@ -148,9 +148,11 @@ class TestTranslatedLayer(unittest.TestCase): parameters=translated_layer.parameters()) loss = train(translated_layer, self.loader, self.loss_fn, sgd) - self.assertTrue(np.array_equal(orig_loss.numpy(), loss.numpy()), - msg="original loss:\n{}\nnew loss:\n{}\n".format( - orig_loss.numpy(), loss.numpy())) + np.testing.assert_array_equal( + orig_loss.numpy(), + loss.numpy(), + err_msg='original loss:\n{}\nnew loss:\n{}\n'.format( + orig_loss.numpy(), loss.numpy())) def test_get_program(self): # load diff --git a/python/paddle/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py index fb48f631850..7f1794c39fc 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_transpose_op.py @@ -458,13 +458,13 @@ class TestMoveAxis(unittest.TestCase): exe = paddle.static.Executor() out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0] - self.assertEqual(np.array_equal(out_np, expected), True) + np.testing.assert_array_equal(out_np, expected) paddle.disable_static() x = paddle.to_tensor(x_np) out = paddle.moveaxis(x, [0, 4, 3, 2], [1, 3, 2, 0]) self.assertEqual(out.shape, [4, 2, 5, 7, 3]) - self.assertEqual(np.array_equal(out.numpy(), expected), True) + np.testing.assert_array_equal(out.numpy(), expected) paddle.enable_static() def test_moveaxis2(self): @@ -478,13 +478,13 @@ class TestMoveAxis(unittest.TestCase): exe = paddle.static.Executor() out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0] - self.assertEqual(np.array_equal(out_np, expected), True) + np.testing.assert_array_equal(out_np, expected) paddle.disable_static() x = paddle.to_tensor(x_np) out = x.moveaxis(-2, -1) self.assertEqual(out.shape, [2, 5, 3]) - self.assertEqual(np.array_equal(out.numpy(), expected), True) + np.testing.assert_array_equal(out.numpy(), expected) paddle.enable_static() def test_moveaxis3(self): diff --git a/python/paddle/fluid/tests/unittests/test_unbind_op.py b/python/paddle/fluid/tests/unittests/test_unbind_op.py index 5f8fb382eb9..58b943a2668 100644 --- a/python/paddle/fluid/tests/unittests/test_unbind_op.py +++ b/python/paddle/fluid/tests/unittests/test_unbind_op.py @@ -1,230 +1,230 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest -import numpy as np -from op_test import OpTest, convert_float_to_uint16 -import paddle -import paddle.fluid as fluid -import paddle.tensor as tensor -from paddle.fluid import compiler, Program, program_guard, core -from paddle.fluid.framework import _test_eager_guard - - -class TestUnbind(unittest.TestCase): - - def test_unbind(self): - - x_1 = fluid.data(shape=[2, 3], dtype='float32', name='x_1') - [out_0, out_1] = tensor.unbind(input=x_1, axis=0) - input_1 = np.random.random([2, 3]).astype("float32") - axis = fluid.data(shape=[1], dtype='int32', name='axis') - exe = fluid.Executor(place=fluid.CPUPlace()) - - [res_1, res_2] = exe.run(fluid.default_main_program(), - feed={ - "x_1": input_1, - "axis": 0 - }, - fetch_list=[out_0, out_1]) - - assert np.array_equal(res_1, input_1[0, 0:100]) - assert np.array_equal(res_2, input_1[1, 0:100]) - - def test_unbind_dygraph(self): - with fluid.dygraph.guard(): - np_x = np.random.random([2, 3]).astype("float32") - x = paddle.to_tensor(np_x) - x.stop_gradient = False - [res_1, res_2] = paddle.unbind(x, 0) - self.assertTrue(np.array_equal(res_1, np_x[0, 0:100])) - self.assertTrue(np.array_equal(res_2, np_x[1, 0:100])) - - out = paddle.add_n([res_1, res_2]) - - np_grad = np.ones(x.shape, np.float32) - out.backward() - self.assertTrue(np.array_equal(x.grad.numpy(), np_grad)) - - def test_unbind_dygraph_final_state(self): - with _test_eager_guard(): - self.test_unbind_dygraph() - - -class TestLayersUnbind(unittest.TestCase): - - def test_layers_unbind(self): - - x_1 = fluid.data(shape=[2, 3], dtype='float32', name='x_1') - [out_0, out_1] = fluid.layers.unbind(input=x_1, axis=0) - input_1 = np.random.random([2, 3]).astype("float32") - axis = fluid.data(shape=[1], dtype='int32', name='axis') - exe = fluid.Executor(place=fluid.CPUPlace()) - - [res_1, res_2] = exe.run(fluid.default_main_program(), - feed={ - "x_1": input_1, - "axis": 0 - }, - fetch_list=[out_0, out_1]) - - assert np.array_equal(res_1, input_1[0, 0:100]) - assert np.array_equal(res_2, input_1[1, 0:100]) - - -class TestUnbindOp(OpTest): - - def initParameters(self): - pass - - def outReshape(self): - pass - - def setAxis(self): - pass - - def setUp(self): - self._set_op_type() - self.dtype = self.get_dtype() - self.axis = 0 - self.num = 3 - self.initParameters() - x = np.arange(12).reshape(3, 2, 2).astype(self.dtype) - self.out = np.split(x, self.num, self.axis) - self.outReshape() - self.inputs = {'X': x} - self.attrs = {'axis': self.axis} - self.setAxis() - self.outputs = {'Out': [('out%d' % i, self.out[i]) \ - for i in range(len(self.out))]} - - def get_dtype(self): - return "float64" - - def _set_op_type(self): - self.op_type = "unbind" - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], ['out0', 'out1', 'out2']) - - -class TestUnbindOp1(TestUnbindOp): - - def initParameters(self): - self.axis = 1 - self.num = 2 - - def test_check_grad(self): - self.check_grad(['X'], ['out0', 'out1']) - - def outReshape(self): - self.out[0] = self.out[0].reshape((3, 2)) - self.out[1] = self.out[1].reshape((3, 2)) - - -class TestUnbindOp2(TestUnbindOp): - - def initParameters(self): - self.axis = 2 - self.num = 2 - - def test_check_grad(self): - self.check_grad(['X'], ['out0', 'out1']) - - def outReshape(self): - self.out[0] = self.out[0].reshape((3, 2)) - self.out[1] = self.out[1].reshape((3, 2)) - - -class TestUnbindOp3(TestUnbindOp): - - def initParameters(self): - self.axis = 2 - self.num = 2 - - def setAxis(self): - self.attrs = {'axis': -1} - - def test_check_grad(self): - self.check_grad(['X'], ['out0', 'out1']) - - def outReshape(self): - self.out[0] = self.out[0].reshape((3, 2)) - self.out[1] = self.out[1].reshape((3, 2)) - - -class TestUnbindOp4(TestUnbindOp): - - def initParameters(self): - self.axis = 1 - self.num = 2 - - def setAxis(self): - self.attrs = {'axis': -2} - - def test_check_grad(self): - self.check_grad(['X'], ['out0', 'out1']) - - def outReshape(self): - self.out[0] = self.out[0].reshape((3, 2)) - self.out[1] = self.out[1].reshape((3, 2)) - - -class TestUnbindBF16Op(OpTest): - - def setUp(self): - self._set_op_type() - self.python_api = paddle.unbind - self.dtype = self.get_dtype() - self.axis = 0 - self.num = 3 - x = np.arange(12).reshape(3, 2, 2).astype(self.dtype) - self.out = np.split(x, self.num, self.axis) - self.inputs = {'X': convert_float_to_uint16(x)} - self.attrs = {'axis': self.axis} - self.outputs = {'Out': [('out%d' % i, convert_float_to_uint16(self.out[i])) \ - for i in range(len(self.out))]} - - def get_dtype(self): - return np.uint16 - - def _set_op_type(self): - self.op_type = "unbind" - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - pass - - -class TestUnbindAxisError(unittest.TestCase): - - def test_errors(self): - with program_guard(Program(), Program()): - x = fluid.data(shape=[2, 3], dtype='float32', name='x') - - def test_table_Variable(): - tensor.unbind(input=x, axis=2.0) - - self.assertRaises(TypeError, test_table_Variable) - - -if __name__ == '__main__': - unittest.main() +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest, convert_float_to_uint16 +import paddle +import paddle.fluid as fluid +import paddle.tensor as tensor +from paddle.fluid import compiler, Program, program_guard, core +from paddle.fluid.framework import _test_eager_guard + + +class TestUnbind(unittest.TestCase): + + def test_unbind(self): + + x_1 = fluid.data(shape=[2, 3], dtype='float32', name='x_1') + [out_0, out_1] = tensor.unbind(input=x_1, axis=0) + input_1 = np.random.random([2, 3]).astype("float32") + axis = fluid.data(shape=[1], dtype='int32', name='axis') + exe = fluid.Executor(place=fluid.CPUPlace()) + + [res_1, res_2] = exe.run(fluid.default_main_program(), + feed={ + "x_1": input_1, + "axis": 0 + }, + fetch_list=[out_0, out_1]) + + assert np.array_equal(res_1, input_1[0, 0:100]) + assert np.array_equal(res_2, input_1[1, 0:100]) + + def test_unbind_dygraph(self): + with fluid.dygraph.guard(): + np_x = np.random.random([2, 3]).astype("float32") + x = paddle.to_tensor(np_x) + x.stop_gradient = False + [res_1, res_2] = paddle.unbind(x, 0) + np.testing.assert_array_equal(res_1, np_x[0, 0:100]) + np.testing.assert_array_equal(res_2, np_x[1, 0:100]) + + out = paddle.add_n([res_1, res_2]) + + np_grad = np.ones(x.shape, np.float32) + out.backward() + np.testing.assert_array_equal(x.grad.numpy(), np_grad) + + def test_unbind_dygraph_final_state(self): + with _test_eager_guard(): + self.test_unbind_dygraph() + + +class TestLayersUnbind(unittest.TestCase): + + def test_layers_unbind(self): + + x_1 = fluid.data(shape=[2, 3], dtype='float32', name='x_1') + [out_0, out_1] = fluid.layers.unbind(input=x_1, axis=0) + input_1 = np.random.random([2, 3]).astype("float32") + axis = fluid.data(shape=[1], dtype='int32', name='axis') + exe = fluid.Executor(place=fluid.CPUPlace()) + + [res_1, res_2] = exe.run(fluid.default_main_program(), + feed={ + "x_1": input_1, + "axis": 0 + }, + fetch_list=[out_0, out_1]) + + assert np.array_equal(res_1, input_1[0, 0:100]) + assert np.array_equal(res_2, input_1[1, 0:100]) + + +class TestUnbindOp(OpTest): + + def initParameters(self): + pass + + def outReshape(self): + pass + + def setAxis(self): + pass + + def setUp(self): + self._set_op_type() + self.dtype = self.get_dtype() + self.axis = 0 + self.num = 3 + self.initParameters() + x = np.arange(12).reshape(3, 2, 2).astype(self.dtype) + self.out = np.split(x, self.num, self.axis) + self.outReshape() + self.inputs = {'X': x} + self.attrs = {'axis': self.axis} + self.setAxis() + self.outputs = {'Out': [('out%d' % i, self.out[i]) \ + for i in range(len(self.out))]} + + def get_dtype(self): + return "float64" + + def _set_op_type(self): + self.op_type = "unbind" + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], ['out0', 'out1', 'out2']) + + +class TestUnbindOp1(TestUnbindOp): + + def initParameters(self): + self.axis = 1 + self.num = 2 + + def test_check_grad(self): + self.check_grad(['X'], ['out0', 'out1']) + + def outReshape(self): + self.out[0] = self.out[0].reshape((3, 2)) + self.out[1] = self.out[1].reshape((3, 2)) + + +class TestUnbindOp2(TestUnbindOp): + + def initParameters(self): + self.axis = 2 + self.num = 2 + + def test_check_grad(self): + self.check_grad(['X'], ['out0', 'out1']) + + def outReshape(self): + self.out[0] = self.out[0].reshape((3, 2)) + self.out[1] = self.out[1].reshape((3, 2)) + + +class TestUnbindOp3(TestUnbindOp): + + def initParameters(self): + self.axis = 2 + self.num = 2 + + def setAxis(self): + self.attrs = {'axis': -1} + + def test_check_grad(self): + self.check_grad(['X'], ['out0', 'out1']) + + def outReshape(self): + self.out[0] = self.out[0].reshape((3, 2)) + self.out[1] = self.out[1].reshape((3, 2)) + + +class TestUnbindOp4(TestUnbindOp): + + def initParameters(self): + self.axis = 1 + self.num = 2 + + def setAxis(self): + self.attrs = {'axis': -2} + + def test_check_grad(self): + self.check_grad(['X'], ['out0', 'out1']) + + def outReshape(self): + self.out[0] = self.out[0].reshape((3, 2)) + self.out[1] = self.out[1].reshape((3, 2)) + + +class TestUnbindBF16Op(OpTest): + + def setUp(self): + self._set_op_type() + self.python_api = paddle.unbind + self.dtype = self.get_dtype() + self.axis = 0 + self.num = 3 + x = np.arange(12).reshape(3, 2, 2).astype(self.dtype) + self.out = np.split(x, self.num, self.axis) + self.inputs = {'X': convert_float_to_uint16(x)} + self.attrs = {'axis': self.axis} + self.outputs = {'Out': [('out%d' % i, convert_float_to_uint16(self.out[i])) \ + for i in range(len(self.out))]} + + def get_dtype(self): + return np.uint16 + + def _set_op_type(self): + self.op_type = "unbind" + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + pass + + +class TestUnbindAxisError(unittest.TestCase): + + def test_errors(self): + with program_guard(Program(), Program()): + x = fluid.data(shape=[2, 3], dtype='float32', name='x') + + def test_table_Variable(): + tensor.unbind(input=x, axis=2.0) + + self.assertRaises(TypeError, test_table_Variable) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py index fb250bc64b2..fa0234227d4 100755 --- a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py @@ -187,7 +187,7 @@ class API_TestUnsqueeze3(unittest.TestCase): "data2": input2 }, fetch_list=[result_squeeze]) - self.assertTrue(np.array_equal(input1, result1)) + np.testing.assert_array_equal(input1, result1) self.assertEqual(input1.shape, result1.shape) @@ -200,7 +200,7 @@ class API_TestDyUnsqueeze(unittest.TestCase): input = paddle.to_tensor(input_1) output = paddle.unsqueeze(input, axis=[1]) out_np = output.numpy() - self.assertTrue(np.array_equal(input1, out_np)) + np.testing.assert_array_equal(input1, out_np) self.assertEqual(input1.shape, out_np.shape) @@ -213,7 +213,7 @@ class API_TestDyUnsqueeze2(unittest.TestCase): input = paddle.to_tensor(input1) output = paddle.unsqueeze(input, axis=1) out_np = output.numpy() - self.assertTrue(np.array_equal(out1, out_np)) + np.testing.assert_array_equal(out1, out_np) self.assertEqual(out1.shape, out_np.shape) @@ -227,7 +227,7 @@ class API_TestDyUnsqueezeAxisTensor(unittest.TestCase): input = paddle.to_tensor(input1) output = paddle.unsqueeze(input, axis=paddle.to_tensor([1, 2])) out_np = output.numpy() - self.assertTrue(np.array_equal(out1, out_np)) + np.testing.assert_array_equal(out1, out_np) self.assertEqual(out1.shape, out_np.shape) @@ -245,7 +245,7 @@ class API_TestDyUnsqueezeAxisTensorList(unittest.TestCase): axis=[paddle.to_tensor([1]), paddle.to_tensor([2])]) out_np = output.numpy() - self.assertTrue(np.array_equal(out1, out_np)) + np.testing.assert_array_equal(out1, out_np) self.assertEqual(out1.shape, out_np.shape) diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index c16238486df..e670781bee7 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -39,7 +39,7 @@ class TestVarBase(unittest.TestCase): paddle.set_default_dtype('float32') # set_default_dtype should not take effect on int x = paddle.to_tensor(1, place=place, stop_gradient=False) - self.assertTrue(np.array_equal(x.numpy(), [1])) + np.testing.assert_array_equal(x.numpy(), [1]) self.assertNotEqual(x.dtype, core.VarDesc.VarType.FP32) y = paddle.to_tensor(2, place=x.place) @@ -49,8 +49,8 @@ class TestVarBase(unittest.TestCase): x = paddle.to_tensor(np.array([1.2]).astype('float16'), place=place, stop_gradient=False) - self.assertTrue( - np.array_equal(x.numpy(), np.array([1.2], 'float16'))) + np.testing.assert_array_equal(x.numpy(), + np.array([1.2], 'float16')) self.assertEqual(x.dtype, core.VarDesc.VarType.FP16) # set_default_dtype take effect on int @@ -59,20 +59,17 @@ class TestVarBase(unittest.TestCase): # set_default_dtype take effect on float x = paddle.to_tensor(1.2, place=place, stop_gradient=False) - self.assertTrue( - np.array_equal(x.numpy(), - np.array([1.2]).astype('float32'))) + np.testing.assert_array_equal(x.numpy(), + np.array([1.2]).astype('float32')) self.assertEqual(x.dtype, core.VarDesc.VarType.FP32) clone_x = x.clone() - self.assertTrue( - np.array_equal(clone_x.numpy(), - np.array([1.2]).astype('float32'))) + np.testing.assert_array_equal(clone_x.numpy(), + np.array([1.2]).astype('float32')) self.assertEqual(clone_x.dtype, core.VarDesc.VarType.FP32) y = clone_x**2 y.backward() - self.assertTrue( - np.array_equal(x.grad.numpy(), - np.array([2.4]).astype('float32'))) + np.testing.assert_array_equal(x.grad.numpy(), + np.array([2.4]).astype('float32')) y = x.cpu() self.assertEqual(y.place.__repr__(), "Place(cpu)") if core.is_compiled_with_cuda(): @@ -98,23 +95,23 @@ class TestVarBase(unittest.TestCase): # set_default_dtype take effect on complex x = paddle.to_tensor(1 + 2j, place=place, stop_gradient=False) - self.assertTrue(np.array_equal(x.numpy(), [1 + 2j])) + np.testing.assert_array_equal(x.numpy(), [1 + 2j]) self.assertEqual(x.dtype, core.VarDesc.VarType.COMPLEX64) paddle.set_default_dtype('float64') x = paddle.to_tensor(1.2, place=place, stop_gradient=False) - self.assertTrue(np.array_equal(x.numpy(), [1.2])) + np.testing.assert_array_equal(x.numpy(), [1.2]) self.assertEqual(x.dtype, core.VarDesc.VarType.FP64) x = paddle.to_tensor(1 + 2j, place=place, stop_gradient=False) - self.assertTrue(np.array_equal(x.numpy(), [1 + 2j])) + np.testing.assert_array_equal(x.numpy(), [1 + 2j]) self.assertEqual(x.dtype, core.VarDesc.VarType.COMPLEX128) x = paddle.to_tensor(1, dtype='float32', place=place, stop_gradient=False) - self.assertTrue(np.array_equal(x.numpy(), [1.])) + np.testing.assert_array_equal(x.numpy(), [1.0]) self.assertEqual(x.dtype, core.VarDesc.VarType.FP32) self.assertEqual(x.shape, [1]) self.assertEqual(x.stop_gradient, False) @@ -128,7 +125,7 @@ class TestVarBase(unittest.TestCase): dtype='float32', place=place, stop_gradient=False) - self.assertTrue(np.array_equal(x.numpy(), [1., 2.])) + np.testing.assert_array_equal(x.numpy(), [1.0, 2.0]) self.assertEqual(x.dtype, core.VarDesc.VarType.FP32) self.assertEqual(x.grad, None) self.assertEqual(x.shape, [2]) @@ -139,7 +136,7 @@ class TestVarBase(unittest.TestCase): dtype='float32', place=place, stop_gradient=False) - self.assertTrue(np.array_equal(x.numpy(), self.array)) + np.testing.assert_array_equal(x.numpy(), self.array) self.assertEqual(x.dtype, core.VarDesc.VarType.FP32) self.assertEqual(x.shape, self.shape) self.assertEqual(x.stop_gradient, False) @@ -147,19 +144,19 @@ class TestVarBase(unittest.TestCase): y = paddle.to_tensor(x) y = paddle.to_tensor(y, dtype='float64', place=place) - self.assertTrue(np.array_equal(y.numpy(), self.array)) + np.testing.assert_array_equal(y.numpy(), self.array) self.assertEqual(y.dtype, core.VarDesc.VarType.FP64) self.assertEqual(y.shape, self.shape) self.assertEqual(y.stop_gradient, True) self.assertEqual(y.type, core.VarDesc.VarType.LOD_TENSOR) z = x + y - self.assertTrue(np.array_equal(z.numpy(), 2 * self.array)) + np.testing.assert_array_equal(z.numpy(), 2 * self.array) x = paddle.to_tensor([1 + 2j, 1 - 2j], dtype='complex64', place=place) y = paddle.to_tensor(x) - self.assertTrue(np.array_equal(x.numpy(), [1 + 2j, 1 - 2j])) + np.testing.assert_array_equal(x.numpy(), [1 + 2j, 1 - 2j]) self.assertEqual(y.dtype, core.VarDesc.VarType.COMPLEX64) self.assertEqual(y.shape, [2]) @@ -168,7 +165,7 @@ class TestVarBase(unittest.TestCase): x_array = np.array(x) self.assertEqual(x_array.shape, x.numpy().shape) self.assertEqual(x_array.dtype, x.numpy().dtype) - self.assertTrue(np.array_equal(x_array, x.numpy())) + np.testing.assert_array_equal(x_array, x.numpy()) x = paddle.to_tensor(1.0) self.assertEqual(x.item(), 1.0) @@ -178,9 +175,8 @@ class TestVarBase(unittest.TestCase): self.assertTrue(isinstance(x.item(5), float)) self.assertTrue(isinstance(x.item(1, 0, 1), float)) self.assertEqual(x.item(5), x.item(1, 0, 1)) - self.assertTrue( - np.array_equal(x.item(1, 0, 1), - x.numpy().item(1, 0, 1))) + np.testing.assert_array_equal(x.item(1, 0, 1), + x.numpy().item(1, 0, 1)) x = paddle.to_tensor([[1.111111, 2.222222, 3.333333]]) self.assertEqual(x.item(0, 2), x.item(2)) @@ -228,7 +224,7 @@ class TestVarBase(unittest.TestCase): self.assertEqual(x.shape, [0]) expected_result = np.array([], dtype='float32') self.assertEqual(x.numpy().shape, expected_result.shape) - self.assertTrue(np.array_equal(x.numpy(), expected_result)) + np.testing.assert_array_equal(x.numpy(), expected_result) numpy_array = np.random.randn(3, 4) # covert core.LoDTensor to paddle.Tensor @@ -236,7 +232,7 @@ class TestVarBase(unittest.TestCase): place = paddle.fluid.framework._current_expected_place() lod_tensor.set(numpy_array, place) x = paddle.to_tensor(lod_tensor) - self.assertTrue(np.array_equal(x.numpy(), numpy_array)) + np.testing.assert_array_equal(x.numpy(), numpy_array) self.assertEqual(x.type, core.VarDesc.VarType.LOD_TENSOR) self.assertEqual(str(x.place), str(place)) @@ -245,7 +241,7 @@ class TestVarBase(unittest.TestCase): dlpack = x.value().get_tensor()._to_dlpack() tensor_from_dlpack = paddle.fluid.core.from_dlpack(dlpack) x = paddle.to_tensor(tensor_from_dlpack) - self.assertTrue(np.array_equal(x.numpy(), numpy_array)) + np.testing.assert_array_equal(x.numpy(), numpy_array) self.assertEqual(x.type, core.VarDesc.VarType.LOD_TENSOR) with self.assertRaises(ValueError): @@ -326,13 +322,13 @@ class TestVarBase(unittest.TestCase): lod_tensor = core.LoDTensor() lod_tensor.set(a_np, core.CPUPlace()) a = paddle.to_tensor(lod_tensor) - self.assertTrue(np.array_equal(a_np, a.numpy())) + np.testing.assert_array_equal(a_np, a.numpy()) with paddle.fluid.dygraph.guard(core.CUDAPlace(0)): lod_tensor = core.LoDTensor() lod_tensor.set(a_np, core.CUDAPlace(0)) a = paddle.to_tensor(lod_tensor, place=core.CPUPlace()) - self.assertTrue(np.array_equal(a_np, a.numpy())) + np.testing.assert_array_equal(a_np, a.numpy()) self.assertTrue(a.place.__repr__(), "Place(cpu)") def test_to_tensor_with_lodtensor(self): @@ -343,7 +339,7 @@ class TestVarBase(unittest.TestCase): def func_test_to_variable(self): with fluid.dygraph.guard(): var = fluid.dygraph.to_variable(self.array, name="abc") - self.assertTrue(np.array_equal(var.numpy(), self.array)) + np.testing.assert_array_equal(var.numpy(), self.array) self.assertEqual(var.name, 'abc') # default value self.assertEqual(var.persistable, False) @@ -368,7 +364,7 @@ class TestVarBase(unittest.TestCase): with fluid.dygraph.guard(): array = [[[1, 2], [1, 2], [1.0, 2]], [[1, 2], [1, 2], [1, 2]]] var = fluid.dygraph.to_variable(array, dtype='int32') - self.assertTrue(np.array_equal(var.numpy(), array)) + np.testing.assert_array_equal(var.numpy(), array) self.assertEqual(var.shape, [2, 3, 2]) self.assertEqual(var.dtype, core.VarDesc.VarType.INT32) self.assertEqual(var.type, core.VarDesc.VarType.LOD_TENSOR) @@ -382,7 +378,7 @@ class TestVarBase(unittest.TestCase): with fluid.dygraph.guard(): array = (((1, 2), (1, 2), (1, 2)), ((1, 2), (1, 2), (1, 2))) var = fluid.dygraph.to_variable(array, dtype='float32') - self.assertTrue(np.array_equal(var.numpy(), array)) + np.testing.assert_array_equal(var.numpy(), array) self.assertEqual(var.shape, [2, 3, 2]) self.assertEqual(var.dtype, core.VarDesc.VarType.FP32) self.assertEqual(var.type, core.VarDesc.VarType.LOD_TENSOR) @@ -397,7 +393,7 @@ class TestVarBase(unittest.TestCase): t = fluid.Tensor() t.set(np.random.random((1024, 1024)), fluid.CPUPlace()) var = fluid.dygraph.to_variable(t) - self.assertTrue(np.array_equal(t, var.numpy())) + np.testing.assert_array_equal(t, var.numpy()) def test_tensor_to_variable(self): with _test_eager_guard(): @@ -516,11 +512,11 @@ class TestVarBase(unittest.TestCase): self.assertEqual(x_copy.persistable, y_copy.persistable) self.assertEqual(x_copy.type, y_copy.type) self.assertEqual(x_copy.dtype, y_copy.dtype) - self.assertTrue(np.array_equal(x.numpy(), x_copy.numpy())) - self.assertTrue(np.array_equal(y.numpy(), y_copy.numpy())) + np.testing.assert_array_equal(x.numpy(), x_copy.numpy()) + np.testing.assert_array_equal(y.numpy(), y_copy.numpy()) self.assertNotEqual(id(x), id(x_copy)) - self.assertTrue(np.array_equal(x.numpy(), [2.])) + np.testing.assert_array_equal(x.numpy(), [2.0]) with self.assertRaises(ValueError): x_copy[:] = 5. @@ -559,9 +555,9 @@ class TestVarBase(unittest.TestCase): self.assertEqual(copy_selected_rows.height(), selected_rows.height()) self.assertEqual(copy_selected_rows.rows(), selected_rows.rows()) - self.assertTrue( - np.array_equal(np.array(copy_selected_rows.get_tensor()), - np.array(selected_rows.get_tensor()))) + np.testing.assert_array_equal( + np.array(copy_selected_rows.get_tensor()), + np.array(selected_rows.get_tensor())) def test_deep_copy(self): with _test_eager_guard(): @@ -577,7 +573,7 @@ class TestVarBase(unittest.TestCase): tmp2 = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) var.set_value(tmp2) - self.assertTrue(np.array_equal(var.numpy(), tmp2)) + np.testing.assert_array_equal(var.numpy(), tmp2) def test_set_value(self): with _test_eager_guard(): @@ -729,33 +725,32 @@ class TestVarBase(unittest.TestCase): ] local_out = [var.numpy() for var in vars] - self.assertTrue(np.array_equal(local_out[1], tensor_array[0, 1, 1:2])) - self.assertTrue(np.array_equal(local_out[2], tensor_array[1:])) - self.assertTrue(np.array_equal(local_out[3], tensor_array[0:1])) - self.assertTrue(np.array_equal(local_out[4], tensor_array[::-1])) - self.assertTrue(np.array_equal(local_out[5], tensor_array[1, 1:, 1:])) - self.assertTrue( - np.array_equal(local_out[6], - tensor_array.reshape((3, -1, 3))[:, :, -1])) - self.assertTrue(np.array_equal(local_out[7], tensor_array[:, :, :-1])) - self.assertTrue(np.array_equal(local_out[8], tensor_array[:1, :1, :1])) - self.assertTrue( - np.array_equal(local_out[9], tensor_array[:-1, :-1, :-1])) - self.assertTrue( - np.array_equal(local_out[10], tensor_array[::-1, :1, :-1])) - self.assertTrue( - np.array_equal(local_out[11], tensor_array[:-1, ::-1, -1:])) - self.assertTrue( - np.array_equal(local_out[12], tensor_array[1:2, 2:, ::-1])) - self.assertTrue( - np.array_equal(local_out[13], tensor_array[2:10, 2:, -2:-1])) - self.assertTrue( - np.array_equal(local_out[14], tensor_array[1:-1, 0:2, ::-1])) - self.assertTrue( - np.array_equal(local_out[15], tensor_array[::-1, ::-1, ::-1])) - self.assertTrue(np.array_equal(local_out[16], tensor_array[-4:4])) - self.assertTrue(np.array_equal(local_out[17], tensor_array[:, 0, 0:0])) - self.assertTrue(np.array_equal(local_out[18], tensor_array[:, 1:1:2])) + np.testing.assert_array_equal(local_out[1], tensor_array[0, 1, 1:2]) + np.testing.assert_array_equal(local_out[2], tensor_array[1:]) + np.testing.assert_array_equal(local_out[3], tensor_array[0:1]) + np.testing.assert_array_equal(local_out[4], tensor_array[::-1]) + np.testing.assert_array_equal(local_out[5], tensor_array[1, 1:, 1:]) + np.testing.assert_array_equal( + local_out[6], + tensor_array.reshape((3, -1, 3))[:, :, -1]) + np.testing.assert_array_equal(local_out[7], tensor_array[:, :, :-1]) + np.testing.assert_array_equal(local_out[8], tensor_array[:1, :1, :1]) + np.testing.assert_array_equal(local_out[9], tensor_array[:-1, :-1, :-1]) + np.testing.assert_array_equal(local_out[10], + tensor_array[::-1, :1, :-1]) + np.testing.assert_array_equal(local_out[11], tensor_array[:-1, ::-1, + -1:]) + np.testing.assert_array_equal(local_out[12], tensor_array[1:2, + 2:, ::-1]) + np.testing.assert_array_equal(local_out[13], tensor_array[2:10, 2:, + -2:-1]) + np.testing.assert_array_equal(local_out[14], tensor_array[1:-1, + 0:2, ::-1]) + np.testing.assert_array_equal(local_out[15], + tensor_array[::-1, ::-1, ::-1]) + np.testing.assert_array_equal(local_out[16], tensor_array[-4:4]) + np.testing.assert_array_equal(local_out[17], tensor_array[:, 0, 0:0]) + np.testing.assert_array_equal(local_out[18], tensor_array[:, 1:1:2]) def _test_slice_for_tensor_attr(self): tensor_array = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], @@ -795,31 +790,30 @@ class TestVarBase(unittest.TestCase): ] local_out = [var.numpy() for var in vars] - self.assertTrue(np.array_equal(local_out[1], tensor_array[0, 1, 1:2])) - self.assertTrue(np.array_equal(local_out[2], tensor_array[1:])) - self.assertTrue(np.array_equal(local_out[3], tensor_array[0:1])) - self.assertTrue(np.array_equal(local_out[4], tensor_array[::-1])) - self.assertTrue(np.array_equal(local_out[5], tensor_array[1, 1:, 1:])) - self.assertTrue( - np.array_equal(local_out[6], - tensor_array.reshape((3, -1, 3))[:, :, -1])) - self.assertTrue(np.array_equal(local_out[7], tensor_array[:, :, :-1])) - self.assertTrue(np.array_equal(local_out[8], tensor_array[:1, :1, :1])) - self.assertTrue( - np.array_equal(local_out[9], tensor_array[:-1, :-1, :-1])) - self.assertTrue( - np.array_equal(local_out[10], tensor_array[::-1, :1, :-1])) - self.assertTrue( - np.array_equal(local_out[11], tensor_array[:-1, ::-1, -1:])) - self.assertTrue( - np.array_equal(local_out[12], tensor_array[1:2, 2:, ::-1])) - self.assertTrue( - np.array_equal(local_out[13], tensor_array[2:10, 2:, -2:-1])) - self.assertTrue( - np.array_equal(local_out[14], tensor_array[1:-1, 0:2, ::-1])) - self.assertTrue( - np.array_equal(local_out[15], tensor_array[::-1, ::-1, ::-1])) - self.assertTrue(np.array_equal(local_out[16], tensor_array[-4:4])) + np.testing.assert_array_equal(local_out[1], tensor_array[0, 1, 1:2]) + np.testing.assert_array_equal(local_out[2], tensor_array[1:]) + np.testing.assert_array_equal(local_out[3], tensor_array[0:1]) + np.testing.assert_array_equal(local_out[4], tensor_array[::-1]) + np.testing.assert_array_equal(local_out[5], tensor_array[1, 1:, 1:]) + np.testing.assert_array_equal( + local_out[6], + tensor_array.reshape((3, -1, 3))[:, :, -1]) + np.testing.assert_array_equal(local_out[7], tensor_array[:, :, :-1]) + np.testing.assert_array_equal(local_out[8], tensor_array[:1, :1, :1]) + np.testing.assert_array_equal(local_out[9], tensor_array[:-1, :-1, :-1]) + np.testing.assert_array_equal(local_out[10], + tensor_array[::-1, :1, :-1]) + np.testing.assert_array_equal(local_out[11], tensor_array[:-1, ::-1, + -1:]) + np.testing.assert_array_equal(local_out[12], tensor_array[1:2, + 2:, ::-1]) + np.testing.assert_array_equal(local_out[13], tensor_array[2:10, 2:, + -2:-1]) + np.testing.assert_array_equal(local_out[14], tensor_array[1:-1, + 0:2, ::-1]) + np.testing.assert_array_equal(local_out[15], + tensor_array[::-1, ::-1, ::-1]) + np.testing.assert_array_equal(local_out[16], tensor_array[-4:4]) def _test_for_getitem_ellipsis_index(self): shape = (64, 3, 5, 256) @@ -842,15 +836,15 @@ class TestVarBase(unittest.TestCase): var_tensor[:, ..., 100].numpy(), ] - self.assertTrue(np.array_equal(var[0], var_np[..., 0])) - self.assertTrue(np.array_equal(var[1], var_np[..., 1, 0])) - self.assertTrue(np.array_equal(var[2], var_np[0, ..., 1, 0])) - self.assertTrue(np.array_equal(var[3], var_np[1, ..., 1])) - self.assertTrue(np.array_equal(var[4], var_np[2, ...])) - self.assertTrue(np.array_equal(var[5], var_np[2, 0, ...])) - self.assertTrue(np.array_equal(var[6], var_np[2, 0, 1, ...])) - self.assertTrue(np.array_equal(var[7], var_np[...])) - self.assertTrue(np.array_equal(var[8], var_np[:, ..., 100])) + np.testing.assert_array_equal(var[0], var_np[..., 0]) + np.testing.assert_array_equal(var[1], var_np[..., 1, 0]) + np.testing.assert_array_equal(var[2], var_np[0, ..., 1, 0]) + np.testing.assert_array_equal(var[3], var_np[1, ..., 1]) + np.testing.assert_array_equal(var[4], var_np[2, ...]) + np.testing.assert_array_equal(var[5], var_np[2, 0, ...]) + np.testing.assert_array_equal(var[6], var_np[2, 0, 1, ...]) + np.testing.assert_array_equal(var[7], var_np[...]) + np.testing.assert_array_equal(var[8], var_np[:, ..., 100]) var_fp32 = paddle.to_tensor(np_fp32_value) var_int = paddle.to_tensor(np_int_value) @@ -860,8 +854,8 @@ class TestVarBase(unittest.TestCase): # test 1 dim tensor var_one_dim = paddle.to_tensor([1, 2, 3, 4]) - self.assertTrue( - np.array_equal(var_one_dim[..., 0].numpy(), np.array([1]))) + np.testing.assert_array_equal(var_one_dim[..., 0].numpy(), + np.array([1])) def _test_none_index(self): shape = (8, 64, 5, 256) @@ -883,19 +877,18 @@ class TestVarBase(unittest.TestCase): var_tensor[0, 1:10:2, None, None, ...].numpy(), ] - self.assertTrue(np.array_equal(var[0], np_value[1, 0, None])) - self.assertTrue(np.array_equal(var[1], np_value[None, ..., 1, 0])) - self.assertTrue(np.array_equal(var[2], np_value[:, :, :, None])) - self.assertTrue(np.array_equal(var[3], np_value[1, ..., 1, None])) - self.assertTrue(np.array_equal(var[4], np_value[2, ..., None, None])) - self.assertTrue(np.array_equal(var[5], np_value[None, 2, 0, ...])) - self.assertTrue(np.array_equal(var[6], np_value[None, 2, None, 1])) - self.assertTrue(np.array_equal(var[7], np_value[None])) - self.assertTrue(np.array_equal(var[8], np_value[0, 0, None, 0, 0, - None])) - self.assertTrue( - np.array_equal(var[9], np_value[None, None, 0, ..., None])) - self.assertTrue(np.array_equal(var[10], np_value[..., None, :, None])) + np.testing.assert_array_equal(var[0], np_value[1, 0, None]) + np.testing.assert_array_equal(var[1], np_value[None, ..., 1, 0]) + np.testing.assert_array_equal(var[2], np_value[:, :, :, None]) + np.testing.assert_array_equal(var[3], np_value[1, ..., 1, None]) + np.testing.assert_array_equal(var[4], np_value[2, ..., None, None]) + np.testing.assert_array_equal(var[5], np_value[None, 2, 0, ...]) + np.testing.assert_array_equal(var[6], np_value[None, 2, None, 1]) + np.testing.assert_array_equal(var[7], np_value[None]) + np.testing.assert_array_equal(var[8], np_value[0, 0, None, 0, 0, None]) + np.testing.assert_array_equal(var[9], np_value[None, None, 0, ..., + None]) + np.testing.assert_array_equal(var[10], np_value[..., None, :, None]) # TODO(zyfncg) there is a bug of dimensions when slice step > 1 and # indexs has int type @@ -919,19 +912,17 @@ class TestVarBase(unittest.TestCase): var_tensor[tensor_index].numpy(), var_tensor[paddle.to_tensor(index[4])].numpy() ] - self.assertTrue(np.array_equal(var[0], np_value[index[0]])) - self.assertTrue(np.array_equal(var[1], np_value[index[1]])) - self.assertTrue(np.array_equal(var[2], np_value[index[2]])) - self.assertTrue(np.array_equal(var[3], np_value[index[3]])) - self.assertTrue(np.array_equal(var[4], np_value[index[0]])) - self.assertTrue(np.array_equal(var[5], np_value[index2d])) - self.assertTrue(np.array_equal(var[6], np_value[index[4]])) - self.assertTrue( - np.array_equal(var_tensor[var_tensor > 0.67], - np_value[np_value > 0.67])) - self.assertTrue( - np.array_equal(var_tensor[var_tensor < 0.55], - np_value[np_value < 0.55])) + np.testing.assert_array_equal(var[0], np_value[index[0]]) + np.testing.assert_array_equal(var[1], np_value[index[1]]) + np.testing.assert_array_equal(var[2], np_value[index[2]]) + np.testing.assert_array_equal(var[3], np_value[index[3]]) + np.testing.assert_array_equal(var[4], np_value[index[0]]) + np.testing.assert_array_equal(var[5], np_value[index2d]) + np.testing.assert_array_equal(var[6], np_value[index[4]]) + np.testing.assert_array_equal(var_tensor[var_tensor > 0.67], + np_value[np_value > 0.67]) + np.testing.assert_array_equal(var_tensor[var_tensor < 0.55], + np_value[np_value < 0.55]) with self.assertRaises(ValueError): var_tensor[[False, False, False, False]] @@ -951,30 +942,27 @@ class TestVarBase(unittest.TestCase): var = [ var_tensor[tensor_index].numpy(), ] - self.assertTrue(np.array_equal(var[0], np_value[index])) + np.testing.assert_array_equal(var[0], np_value[index]) def _test_for_var(self): np_value = np.random.random((30, 100, 100)).astype('float32') w = fluid.dygraph.to_variable(np_value) for i, e in enumerate(w): - self.assertTrue(np.array_equal(e.numpy(), np_value[i])) + np.testing.assert_array_equal(e.numpy(), np_value[i]) def _test_numpy_index(self): array = np.arange(120).reshape([4, 5, 6]) t = paddle.to_tensor(array) - self.assertTrue(np.array_equal(t[np.longlong(0)].numpy(), array[0])) - self.assertTrue( - np.array_equal( - t[np.longlong(0):np.longlong(4):np.longlong(2)].numpy(), - array[0:4:2])) - self.assertTrue(np.array_equal(t[np.int64(0)].numpy(), array[0])) - self.assertTrue( - np.array_equal(t[np.int32(1):np.int32(4):np.int32(2)].numpy(), - array[1:4:2])) - self.assertTrue( - np.array_equal(t[np.int16(0):np.int16(4):np.int16(2)].numpy(), - array[0:4:2])) + np.testing.assert_array_equal(t[np.longlong(0)].numpy(), array[0]) + np.testing.assert_array_equal( + t[np.longlong(0):np.longlong(4):np.longlong(2)].numpy(), + array[0:4:2]) + np.testing.assert_array_equal(t[np.int64(0)].numpy(), array[0]) + np.testing.assert_array_equal( + t[np.int32(1):np.int32(4):np.int32(2)].numpy(), array[1:4:2]) + np.testing.assert_array_equal( + t[np.int16(0):np.int16(4):np.int16(2)].numpy(), array[0:4:2]) def _test_list_index(self): # case1: @@ -982,8 +970,8 @@ class TestVarBase(unittest.TestCase): x = paddle.to_tensor(array) py_idx = [[0, 2, 0, 1, 3], [0, 0, 1, 2, 0]] idx = [paddle.to_tensor(py_idx[0]), paddle.to_tensor(py_idx[1])] - self.assertTrue(np.array_equal(x[idx].numpy(), array[py_idx])) - self.assertTrue(np.array_equal(x[py_idx].numpy(), array[py_idx])) + np.testing.assert_array_equal(x[idx].numpy(), array[py_idx]) + np.testing.assert_array_equal(x[py_idx].numpy(), array[py_idx]) # case2: tensor_x = paddle.to_tensor( np.zeros(12).reshape(2, 6).astype(np.float32)) @@ -993,12 +981,12 @@ class TestVarBase(unittest.TestCase): res = tensor_x.numpy() exp = np.array([[0., 0., 42., 42., 42., 0.], [0., 0., 42., 42., 42., 0.]]) - self.assertTrue(np.array_equal(res, exp)) + np.testing.assert_array_equal(res, exp) # case3: row = np.array([0, 1, 2]) col = np.array([2, 1, 3]) - self.assertTrue(np.array_equal(array[row, col], x[row, col].numpy())) + np.testing.assert_array_equal(array[row, col], x[row, col].numpy()) def func_test_slice(self): with fluid.dygraph.guard(): @@ -1013,8 +1001,8 @@ class TestVarBase(unittest.TestCase): self._test_list_index() var = fluid.dygraph.to_variable(self.array) - self.assertTrue(np.array_equal(var[1, :].numpy(), self.array[1, :])) - self.assertTrue(np.array_equal(var[::-1].numpy(), self.array[::-1])) + np.testing.assert_array_equal(var[1, :].numpy(), self.array[1, :]) + np.testing.assert_array_equal(var[::-1].numpy(), self.array[::-1]) with self.assertRaises(IndexError): y = var[self.shape[0]] @@ -1034,9 +1022,8 @@ class TestVarBase(unittest.TestCase): def func_test_var_base_to_np(self): with fluid.dygraph.guard(): var = fluid.dygraph.to_variable(self.array) - self.assertTrue( - np.array_equal(var.numpy(), - fluid.framework._var_base_to_np(var))) + np.testing.assert_array_equal(var.numpy(), + fluid.framework._var_base_to_np(var)) def test_var_base_to_np(self): with _test_eager_guard(): @@ -1046,9 +1033,9 @@ class TestVarBase(unittest.TestCase): def func_test_var_base_as_np(self): with fluid.dygraph.guard(): var = fluid.dygraph.to_variable(self.array) - self.assertTrue(np.array_equal(var.numpy(), np.array(var))) - self.assertTrue( - np.array_equal(var.numpy(), np.array(var, dtype=np.float32))) + np.testing.assert_array_equal(var.numpy(), np.array(var)) + np.testing.assert_array_equal(var.numpy(), + np.array(var, dtype=np.float32)) def test_var_base_as_np(self): with _test_eager_guard(): @@ -1342,19 +1329,19 @@ class TestVarBaseSetitem(unittest.TestCase): else: result = self.np_value - self.assertTrue(np.array_equal(self.tensor_x[0].numpy(), result)) + np.testing.assert_array_equal(self.tensor_x[0].numpy(), result) self.assertEqual(id_origin, id(self.tensor_x)) self.tensor_x[1:2] = value if _in_legacy_dygraph(): self.assertEqual(self.tensor_x.inplace_version, 2) - self.assertTrue(np.array_equal(self.tensor_x[1].numpy(), result)) + np.testing.assert_array_equal(self.tensor_x[1].numpy(), result) self.assertEqual(id_origin, id(self.tensor_x)) self.tensor_x[...] = value if _in_legacy_dygraph(): self.assertEqual(self.tensor_x.inplace_version, 3) - self.assertTrue(np.array_equal(self.tensor_x[3].numpy(), result)) + np.testing.assert_array_equal(self.tensor_x[3].numpy(), result) self.assertEqual(id_origin, id(self.tensor_x)) def func_test_value_tensor(self): @@ -1447,19 +1434,19 @@ class TestVarBaseSetitemBoolIndex(unittest.TestCase): else: result = self.np_value - self.assertTrue(np.array_equal(self.tensor_x[0].numpy(), result)) + np.testing.assert_array_equal(self.tensor_x[0].numpy(), result) self.assertEqual(id_origin, id(self.tensor_x)) index_2 = paddle.to_tensor(np.array([False, True, False, False])) self.tensor_x[index_2] = value self.assertEqual(self.tensor_x.inplace_version, 2) - self.assertTrue(np.array_equal(self.tensor_x[1].numpy(), result)) + np.testing.assert_array_equal(self.tensor_x[1].numpy(), result) self.assertEqual(id_origin, id(self.tensor_x)) index_3 = paddle.to_tensor(np.array([True, True, True, True])) self.tensor_x[index_3] = value self.assertEqual(self.tensor_x.inplace_version, 3) - self.assertTrue(np.array_equal(self.tensor_x[3].numpy(), result)) + np.testing.assert_array_equal(self.tensor_x[3].numpy(), result) self.assertEqual(id_origin, id(self.tensor_x)) def func_test_value_tensor(self): @@ -1518,7 +1505,7 @@ class TestVarBaseSetitemBoolScalarIndex(unittest.TestCase): else: result = self.np_value - self.assertTrue(np.array_equal(self.tensor_x[0].numpy(), result)) + np.testing.assert_array_equal(self.tensor_x[0].numpy(), result) self.assertEqual(id_origin, id(self.tensor_x)) diff --git a/python/paddle/fluid/tests/unittests/test_variable.py b/python/paddle/fluid/tests/unittests/test_variable.py index 5fb220da609..7fc6ec93593 100644 --- a/python/paddle/fluid/tests/unittests/test_variable.py +++ b/python/paddle/fluid/tests/unittests/test_variable.py @@ -178,34 +178,31 @@ class TestVariable(unittest.TestCase): var13, var14, var15 ]) - self.assertTrue( - np.array_equal(local_out[1], tensor_array[0, 1, 1:2])) - self.assertTrue(np.array_equal(local_out[2], tensor_array[1:])) - self.assertTrue(np.array_equal(local_out[3], tensor_array[0:1])) - self.assertTrue(np.array_equal(local_out[4], tensor_array[::-1])) - self.assertTrue( - np.array_equal(local_out[5], tensor_array[1, 1:, 1:])) - self.assertTrue( - np.array_equal(local_out[6], - tensor_array.reshape((3, -1, 3))[:, :, -1])) - self.assertTrue( - np.array_equal(local_out[7], tensor_array[:, :, :-1])) - self.assertTrue( - np.array_equal(local_out[8], tensor_array[:1, :1, :1])) - self.assertTrue( - np.array_equal(local_out[9], tensor_array[:-1, :-1, :-1])) - self.assertTrue( - np.array_equal(local_out[10], tensor_array[::-1, :1, :-1])) - self.assertTrue( - np.array_equal(local_out[11], tensor_array[:-1, ::-1, -1:])) - self.assertTrue( - np.array_equal(local_out[12], tensor_array[1:2, 2:, ::-1])) - self.assertTrue( - np.array_equal(local_out[13], tensor_array[2:10, 2:, -2:-1])) - self.assertTrue( - np.array_equal(local_out[14], tensor_array[1:-1, 0:2, ::-1])) - self.assertTrue( - np.array_equal(local_out[15], tensor_array[::-1, ::-1, ::-1])) + np.testing.assert_array_equal(local_out[1], tensor_array[0, 1, 1:2]) + np.testing.assert_array_equal(local_out[2], tensor_array[1:]) + np.testing.assert_array_equal(local_out[3], tensor_array[0:1]) + np.testing.assert_array_equal(local_out[4], tensor_array[::-1]) + np.testing.assert_array_equal(local_out[5], tensor_array[1, 1:, 1:]) + np.testing.assert_array_equal( + local_out[6], + tensor_array.reshape((3, -1, 3))[:, :, -1]) + np.testing.assert_array_equal(local_out[7], tensor_array[:, :, :-1]) + np.testing.assert_array_equal(local_out[8], + tensor_array[:1, :1, :1]) + np.testing.assert_array_equal(local_out[9], + tensor_array[:-1, :-1, :-1]) + np.testing.assert_array_equal(local_out[10], + tensor_array[::-1, :1, :-1]) + np.testing.assert_array_equal(local_out[11], tensor_array[:-1, ::-1, + -1:]) + np.testing.assert_array_equal(local_out[12], tensor_array[1:2, + 2:, ::-1]) + np.testing.assert_array_equal(local_out[13], tensor_array[2:10, 2:, + -2:-1]) + np.testing.assert_array_equal(local_out[14], + tensor_array[1:-1, 0:2, ::-1]) + np.testing.assert_array_equal(local_out[15], + tensor_array[::-1, ::-1, ::-1]) def _test_slice_index_tensor(self, place): data = np.random.rand(2, 3).astype("float32") @@ -593,7 +590,7 @@ class TestListIndex(unittest.TestCase): getitem_pp = exe.run(prog, feed={x.name: array}, fetch_list=fetch_list) - self.assertTrue(np.array_equal(getitem_np, getitem_pp[0])) + np.testing.assert_array_equal(getitem_np, getitem_pp[0]) array = array[0] index = index[0] @@ -620,7 +617,7 @@ class TestListIndex(unittest.TestCase): index = index[0] continue getitem_pp = pt[index_mod] - self.assertTrue(np.array_equal(getitem_np, getitem_pp.numpy())) + np.testing.assert_array_equal(getitem_np, getitem_pp.numpy()) array = array[0] index = index[0] @@ -680,9 +677,10 @@ class TestListIndex(unittest.TestCase): }, fetch_list=fetch_list) - self.assertTrue(np.array_equal(y2, getitem_pp[0]), - msg='\n numpy:{},\n paddle:{}'.format( - y2, getitem_pp[0])) + np.testing.assert_array_equal( + y2, + getitem_pp[0], + err_msg='\n numpy:{},\n paddle:{}'.format(y2, getitem_pp[0])) def test_dygraph_list_index_muti_dim(self): paddle.disable_static() @@ -707,7 +705,7 @@ class TestListIndex(unittest.TestCase): y_np = array[index_t1, index_t2] y = x[index_t1, index_t2] - self.assertTrue(np.array_equal(y.numpy(), y_np)) + np.testing.assert_array_equal(y.numpy(), y_np) def run_getitem_list_index(self, array, index): x = paddle.static.data(name='x', shape=array.shape, dtype='float32') @@ -966,12 +964,16 @@ class TestListIndex(unittest.TestCase): index_2.name: index_mod2 }, fetch_list=fetch_list) - self.assertTrue(np.array_equal(array2, setitem_pp[0]), - msg='\n numpy:{},\n paddle:{}'.format( - array2, setitem_pp[0])) - self.assertTrue(np.array_equal(array3, setitem_pp[1]), - msg='\n numpy:{},\n paddle:{}'.format( - array3, setitem_pp[1])) + np.testing.assert_array_equal( + array2, + setitem_pp[0], + err_msg='\n numpy:{},\n paddle:{}'.format( + array2, setitem_pp[0])) + np.testing.assert_array_equal( + array3, + setitem_pp[1], + err_msg='\n numpy:{},\n paddle:{}'.format( + array3, setitem_pp[1])) array = array[0] index1 = index1[0] index2 = index2[0] @@ -1028,19 +1030,27 @@ class TestListIndex(unittest.TestCase): x2.name: array }, fetch_list=fetch_list) - self.assertTrue(np.array_equal(array2, setitem_pp[0]), - msg='\n numpy:{},\n paddle:{}'.format( - array2, setitem_pp[0])) - self.assertTrue(np.array_equal(array3, setitem_pp[1]), - msg='\n numpy:{},\n paddle:{}'.format( - array3, setitem_pp[1])) - - self.assertTrue(np.array_equal(y_np1, setitem_pp[2]), - msg='\n numpy:{},\n paddle:{}'.format( - y_np1, setitem_pp[2])) - self.assertTrue(np.array_equal(y_np2, setitem_pp[3]), - msg='\n numpy:{},\n paddle:{}'.format( - y_np2, setitem_pp[3])) + np.testing.assert_array_equal( + array2, + setitem_pp[0], + err_msg='\n numpy:{},\n paddle:{}'.format( + array2, setitem_pp[0])) + np.testing.assert_array_equal( + array3, + setitem_pp[1], + err_msg='\n numpy:{},\n paddle:{}'.format( + array3, setitem_pp[1])) + + np.testing.assert_array_equal( + y_np1, + setitem_pp[2], + err_msg='\n numpy:{},\n paddle:{}'.format( + y_np1, setitem_pp[2])) + np.testing.assert_array_equal( + y_np2, + setitem_pp[3], + err_msg='\n numpy:{},\n paddle:{}'.format( + y_np2, setitem_pp[3])) array = array[0] index1 = index1[0] index2 = index2[0] @@ -1069,26 +1079,30 @@ class TestListIndex(unittest.TestCase): y_t1 = tensor1[index_mod_t2, index_mod_t1] - self.assertTrue(np.array_equal(y_t1.numpy(), y_np1), - msg='\n numpy:{},\n paddle:{}'.format( - y_np1, y_t1.numpy())) + np.testing.assert_array_equal( + y_t1.numpy(), + y_np1, + err_msg='\n numpy:{},\n paddle:{}'.format(y_np1, y_t1.numpy())) # 1 dim getitem array2 = array.copy() y_np2 = array2[index_mod2] tensor2 = paddle.to_tensor(array) y_t2 = tensor2[index_mod_t2] - self.assertTrue(np.array_equal(y_t2.numpy(), y_np2), - msg='\n numpy:{},\n paddle:{}'.format( - y_np2, y_t2.numpy())) + np.testing.assert_array_equal( + y_t2.numpy(), + y_np2, + err_msg='\n numpy:{},\n paddle:{}'.format(y_np2, y_t2.numpy())) # 2 dim setitem array1 = array.copy() array1[index_mod1, index_mod2] = 1 tensor1[index_mod_t1, index_mod_t2] = 1 - self.assertTrue(np.array_equal(tensor1.numpy(), array1), - msg='\n numpy:{},\n paddle:{}'.format( - array1, tensor1.numpy())) + np.testing.assert_array_equal( + tensor1.numpy(), + array1, + err_msg='\n numpy:{},\n paddle:{}'.format( + array1, tensor1.numpy())) # 1 dim setitem array2 = array.copy() @@ -1096,9 +1110,11 @@ class TestListIndex(unittest.TestCase): tensor2[index_mod_t1] = 2.5 - self.assertTrue(np.array_equal(tensor2.numpy(), array2), - msg='\n numpy:{},\n paddle:{}'.format( - array2, tensor2.numpy())) + np.testing.assert_array_equal( + tensor2.numpy(), + array2, + err_msg='\n numpy:{},\n paddle:{}'.format( + array2, tensor2.numpy())) array = array[0] index1 = index1[0] diff --git a/python/paddle/fluid/tests/unittests/test_view_op_reuse_allocation.py b/python/paddle/fluid/tests/unittests/test_view_op_reuse_allocation.py index a70d8e209b3..ab9fa633bd5 100644 --- a/python/paddle/fluid/tests/unittests/test_view_op_reuse_allocation.py +++ b/python/paddle/fluid/tests/unittests/test_view_op_reuse_allocation.py @@ -48,7 +48,7 @@ class TestDygraphViewReuseAllocation(unittest.TestCase): var_numpy = var.numpy().reshape(self.output_shape) view_var_numpy = view_var.numpy() - self.assertTrue(np.array_equal(var_numpy, view_var_numpy)) + np.testing.assert_array_equal(var_numpy, view_var_numpy) def test_view_api(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_where_op.py b/python/paddle/fluid/tests/unittests/test_where_op.py index 967f917fd93..83381ac9fcd 100644 --- a/python/paddle/fluid/tests/unittests/test_where_op.py +++ b/python/paddle/fluid/tests/unittests/test_where_op.py @@ -282,7 +282,7 @@ class TestWhereDygraphAPI(unittest.TestCase): result = paddle.where(cond, a, b) result = result.numpy() expect = np.where(cond, a, b) - self.assertTrue(np.array_equal(expect, result)) + np.testing.assert_array_equal(expect, result) def test_dygraph_api_broadcast_1(self): cond_shape = [2, 4] diff --git a/python/paddle/fluid/tests/unittests/test_while_loop_op.py b/python/paddle/fluid/tests/unittests/test_while_loop_op.py index 92d67406b03..d97722c0980 100644 --- a/python/paddle/fluid/tests/unittests/test_while_loop_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_loop_op.py @@ -587,7 +587,7 @@ class TestApiWhileLoopSliceInBody(unittest.TestCase): np_x = np.array([1, 2, 3, 4, 5], dtype='int32') res = exe.run(main_program, feed={'x': np_x}, fetch_list=[z]) - self.assertTrue(np.array_equal(res[0], [np.sum(np_x)])) + np.testing.assert_array_equal(res[0], [np.sum(np_x)]) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/xpu/test_scale_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_scale_op_xpu.py index ebdebce2682..956100ea569 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_scale_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_scale_op_xpu.py @@ -123,7 +123,7 @@ class TestScaleApiStatic(unittest.TestCase): exe = paddle.static.Executor(place=paddle.CPUPlace()) out = exe.run(main_prog, feed={"x": input}, fetch_list=[out]) - self.assertEqual(np.array_equal(out[0], input * 2.0 + 3.0), True) + np.testing.assert_array_equal(out[0], input * 2.0 + 3.0) class TestScaleInplaceApiStatic(TestScaleApiStatic): @@ -142,7 +142,7 @@ class TestScaleApiDygraph(unittest.TestCase): input = np.random.random([2, 25]).astype("float32") x = paddle.to_tensor(input) out = self._executed_api(x, scale=2.0, bias=3.0) - self.assertEqual(np.array_equal(out.numpy(), input * 2.0 + 3.0), True) + np.testing.assert_array_equal(out.numpy(), input * 2.0 + 3.0) paddle.enable_static() diff --git a/python/paddle/tests/test_dlpack.py b/python/paddle/tests/test_dlpack.py index 5ca49a09fe8..076fe5545db 100644 --- a/python/paddle/tests/test_dlpack.py +++ b/python/paddle/tests/test_dlpack.py @@ -33,9 +33,8 @@ class TestDLPack(unittest.TestCase): isinstance(out_from_dlpack, paddle.fluid.core.eager.Tensor)) else: self.assertTrue(isinstance(out_from_dlpack, paddle.Tensor)) - self.assertTrue( - np.array_equal(np.array(out_from_dlpack), - np.array([1, 2, 3, 4]).astype('int'))) + np.testing.assert_array_equal(np.array(out_from_dlpack), + np.array([1, 2, 3, 4]).astype('int')) def test_dlpack_dygraph(self): with _test_eager_guard(): @@ -64,9 +63,9 @@ class TestDLPack(unittest.TestCase): dlpack = paddle.utils.dlpack.to_dlpack(tensor) out_from_dlpack = paddle.utils.dlpack.from_dlpack(dlpack) self.assertTrue(isinstance(out_from_dlpack, fluid.core.Tensor)) - self.assertTrue( - np.array_equal(np.array(out_from_dlpack), - np.array([[1], [2], [3], [4]]).astype('int'))) + np.testing.assert_array_equal( + np.array(out_from_dlpack), + np.array([[1], [2], [3], [4]]).astype('int')) # when build with cuda if core.is_compiled_with_cuda(): @@ -76,9 +75,9 @@ class TestDLPack(unittest.TestCase): gdlpack = paddle.utils.dlpack.to_dlpack(gtensor) gout_from_dlpack = paddle.utils.dlpack.from_dlpack(gdlpack) self.assertTrue(isinstance(gout_from_dlpack, fluid.core.Tensor)) - self.assertTrue( - np.array_equal(np.array(gout_from_dlpack), - np.array([[1], [2], [3], [4]]).astype('int'))) + np.testing.assert_array_equal( + np.array(gout_from_dlpack), + np.array([[1], [2], [3], [4]]).astype('int')) def func_test_dlpack_dtype_conversion(self): paddle.disable_static() diff --git a/python/paddle/tests/test_hapi_amp.py b/python/paddle/tests/test_hapi_amp.py index 24df22ab5ea..900a8a0fcc2 100644 --- a/python/paddle/tests/test_hapi_amp.py +++ b/python/paddle/tests/test_hapi_amp.py @@ -127,12 +127,9 @@ class TestHapiWithAmp(unittest.TestCase): model._scaler.state_dict()['incr_count']) self.assertEqual(new_model._scaler.state_dict()['decr_count'], model._scaler.state_dict()['decr_count']) - self.assertTrue( - np.array_equal( - new_model._optimizer.state_dict() - ['conv2d_1.w_0_moment1_0'].numpy(), - model._optimizer.state_dict() - ['conv2d_1.w_0_moment1_0'].numpy())) + np.testing.assert_array_equal( + new_model._optimizer.state_dict()['conv2d_1.w_0_moment1_0'].numpy(), + model._optimizer.state_dict()['conv2d_1.w_0_moment1_0'].numpy()) def test_dynamic_check_input(self): paddle.disable_static() -- GitLab