From 2de0d6769469532cf92dbee33e79041d60ae8fc1 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Wed, 17 Aug 2022 16:56:30 +0800 Subject: [PATCH] [CodeStyle][NPU] use np.testing.assert_allclose instead of self.assertTrue(np.allclose(...)) (part 1) (#44988) * autofix * try resolve precision issues * revert some changes * clean some `err_msg` * 0.0001 -> 1e-4 * update commented assert code * try to fix some shape errors * `numpy` -> `np` * empty commit, trigger kunlun ci, test=kunlun * empty commit, retrigger kunlun ci, test=kunlun * empty commit, trigger kunlun ci, try fix npu memcpy_h2d, test=kunlun * try fix npu import error, test=kunlun --- .../test_weight_quantization_mobilenetv1.py | 14 +- .../fluid/contrib/tests/test_correlation.py | 4 +- .../contrib/tests/test_model_cast_to_bf16.py | 10 +- .../tests/test_multi_precision_fp16_train.py | 24 +- .../contrib/tests/test_weight_decay_extend.py | 9 +- .../custom_op/test_custom_tanh_double_grad.py | 28 +- python/paddle/fluid/tests/test_if_else_op.py | 9 +- .../paddle/fluid/tests/unittests/benchmark.py | 7 +- .../distributed_passes/dist_pass_test_base.py | 11 +- .../test_basic_api_transformation.py | 12 +- .../unittests/dygraph_to_static/test_bert.py | 32 +- .../unittests/dygraph_to_static/test_bmn.py | 44 +- .../dygraph_to_static/test_break_continue.py | 9 +- .../dygraph_to_static/test_cache_program.py | 17 +- .../unittests/dygraph_to_static/test_cast.py | 16 +- .../dygraph_to_static/test_container.py | 17 +- .../dygraph_to_static/test_convert_call.py | 13 +- .../test_cpu_cuda_to_tensor.py | 19 +- .../dygraph_to_static/test_declarative.py | 18 +- .../unittests/dygraph_to_static/test_dict.py | 9 +- .../dygraph_to_static/test_drop_path.py | 4 +- .../dygraph_to_static/test_fetch_feed.py | 9 +- .../dygraph_to_static/test_for_enumerate.py | 2 +- .../dygraph_to_static/test_full_name_usage.py | 14 +- .../unittests/dygraph_to_static/test_grad.py | 6 +- .../test_gradient_aggregation.py | 7 +- .../dygraph_to_static/test_ifelse.py | 4 +- .../dygraph_to_static/test_isinstance.py | 7 +- .../unittests/dygraph_to_static/test_lac.py | 17 +- .../dygraph_to_static/test_layer_hook.py | 17 +- .../unittests/dygraph_to_static/test_len.py | 2 +- .../unittests/dygraph_to_static/test_list.py | 8 +- .../dygraph_to_static/test_logical.py | 18 +- .../unittests/dygraph_to_static/test_loop.py | 6 +- .../unittests/dygraph_to_static/test_lstm.py | 35 +- .../dygraph_to_static/test_mnist_amp.py | 10 +- .../dygraph_to_static/test_mnist_pure_fp16.py | 10 +- .../dygraph_to_static/test_param_guard.py | 12 +- .../dygraph_to_static/test_partial_program.py | 6 +- .../test_program_translator.py | 20 +- .../dygraph_to_static/test_ptb_lm.py | 12 +- .../dygraph_to_static/test_ptb_lm_v2.py | 12 +- .../test_reinforcement_learning.py | 4 +- .../dygraph_to_static/test_return.py | 11 +- .../test_save_inference_model.py | 2 +- .../dygraph_to_static/test_save_load.py | 8 +- .../dygraph_to_static/test_se_resnet.py | 49 +- .../dygraph_to_static/test_sentiment.py | 8 +- .../unittests/dygraph_to_static/test_slice.py | 10 +- .../dygraph_to_static/test_tensor_methods.py | 6 +- .../dygraph_to_static/test_tensor_shape.py | 22 +- .../dygraph_to_static/test_transformer.py | 12 +- .../unittests/dygraph_to_static/test_tsm.py | 4 +- .../dygraph_to_static/test_typing.py | 2 +- .../dygraph_to_static/test_word2vec.py | 4 +- .../dygraph_to_static/test_yolov3.py | 10 +- .../fluid/tests/unittests/fft/test_fft.py | 94 ++- .../custom_ops/test_custom_leaky_relu_ipu.py | 6 +- .../unittests/ipu/test_dy2static_fp16_ipu.py | 2 +- .../tests/unittests/ipu/test_dy2static_ipu.py | 2 +- .../unittests/ipu/test_eval_model_ipu.py | 5 +- .../unittests/ipu/test_identity_loss_ipu.py | 5 +- .../ipu/test_inference_model_io_ipu.py | 2 +- .../unittests/ipu/test_ipu_shard_api_ipu.py | 12 +- .../unittests/ipu/test_lr_sheduler_ipu.py | 2 +- .../unittests/ipu/test_matmul_serilize_ipu.py | 6 +- .../unittests/ipu/test_model_parallel_ipu.py | 20 +- .../tests/unittests/ipu/test_optimizer_ipu.py | 5 +- .../tests/unittests/ipu/test_print_op_ipu.py | 2 +- .../tests/unittests/ipu/test_save_load_ipu.py | 12 +- .../unittests/ipu/test_set_ipu_shard_api.py | 30 +- .../unittests/ipu/test_varname_inplace_ipu.py | 6 +- .../unittests/ipu/test_weight_decay_ipu.py | 5 +- .../unittests/ipu/test_weight_sharing_ipu.py | 6 +- .../unittests/ir/inference/auto_scan_test.py | 9 +- .../ir/inference/inference_pass_test.py | 31 +- .../ir/inference/quant_dequant_test.py | 31 +- .../test_trt_convert_multiclass_nms3.py | 9 +- .../unittests/ir/test_fuse_resnet_unit.py | 5 +- .../unittests/ir/test_ir_generate_pass.py | 10 +- .../mkldnn/test_layer_norm_bf16_mkldnn_op.py | 7 +- .../mkldnn/test_layer_norm_mkldnn_op.py | 6 +- .../mkldnn/test_requantize_mkldnn_op.py | 6 +- .../unittests/mkldnn/test_sum_mkldnn_op.py | 10 +- .../unittests/mlu/test_arg_max_op_mlu.py | 15 +- .../mlu/test_batch_norm_op_mlu_v2.py | 20 +- .../mlu/test_gaussian_random_op_mlu.py | 7 +- .../unittests/mlu/test_momentum_op_mlu.py | 18 +- .../unittests/mlu/test_softmax_op_mlu.py | 8 +- .../mlu/test_sync_batch_norm_op_mlu_extra.py | 10 +- .../mlu/test_uniform_random_op_mlu.py | 12 +- .../unittests/npu/test_arg_max_op_npu.py | 15 +- .../unittests/npu/test_arg_min_op_npu.py | 10 +- .../unittests/npu/test_assign_value_op_npu.py | 5 +- .../npu/test_elementwise_mod_op_npu.py | 4 +- .../unittests/npu/test_multinomial_op_npu.py | 28 +- .../npu/test_take_along_axis_op_npu.py | 4 +- .../npu/test_uniform_random_op_npu.py | 12 +- .../paddle/fluid/tests/unittests/op_test.py | 28 +- .../tests/unittests/test_activation_op.py | 146 ++--- .../unittests/test_activation_sparse_op.py | 2 +- .../fluid/tests/unittests/test_adam_op.py | 31 +- .../unittests/test_adaptive_avg_pool1d.py | 8 +- .../unittests/test_adaptive_max_pool1d.py | 6 +- .../test_add_position_encoding_op.py | 4 +- .../fluid/tests/unittests/test_addmm_op.py | 12 +- .../fluid/tests/unittests/test_angle_op.py | 4 +- .../fluid/tests/unittests/test_argsort_op.py | 7 +- .../unittests/test_array_read_write_op.py | 18 +- .../fluid/tests/unittests/test_assign_op.py | 24 +- .../tests/unittests/test_assign_value_op.py | 5 +- .../fluid/tests/unittests/test_atan2_op.py | 4 +- .../fluid/tests/unittests/test_base_layer.py | 38 +- .../tests/unittests/test_basic_gru_api.py | 8 +- .../tests/unittests/test_basic_gru_unit_op.py | 2 +- .../tests/unittests/test_basic_lstm_api.py | 14 +- .../unittests/test_basic_lstm_unit_op.py | 12 +- .../tests/unittests/test_batch_norm_op.py | 10 +- .../tests/unittests/test_batch_norm_op_v2.py | 50 +- .../tests/unittests/test_beam_search_op.py | 14 +- .../tests/unittests/test_bernoulli_op.py | 9 +- .../paddle/fluid/tests/unittests/test_bfgs.py | 19 +- .../tests/unittests/test_bicubic_interp_op.py | 4 +- .../unittests/test_bicubic_interp_v2_op.py | 4 +- .../unittests/test_bilinear_interp_op.py | 2 +- .../unittests/test_bilinear_interp_v2_op.py | 10 +- .../fluid/tests/unittests/test_bmm_op.py | 4 +- .../tests/unittests/test_box_coder_op.py | 6 +- .../tests/unittests/test_bucketize_api.py | 8 +- .../paddle/fluid/tests/unittests/test_case.py | 26 +- .../tests/unittests/test_channel_shuffle.py | 6 +- .../tests/unittests/test_cholesky_solve_op.py | 6 +- .../fluid/tests/unittests/test_chunk_op.py | 30 +- .../tests/unittests/test_clip_by_norm_op.py | 10 +- .../fluid/tests/unittests/test_clip_op.py | 64 +- .../unittests/test_collective_api_base.py | 116 ++-- .../tests/unittests/test_collective_base.py | 90 +-- .../fluid/tests/unittests/test_complex_abs.py | 2 +- .../tests/unittests/test_complex_cast.py | 42 +- .../test_complex_elementwise_layers.py | 9 +- .../tests/unittests/test_complex_kron.py | 4 +- .../tests/unittests/test_complex_matmul.py | 18 +- .../fluid/tests/unittests/test_complex_op.py | 4 +- .../tests/unittests/test_complex_reshape.py | 8 +- .../tests/unittests/test_complex_sum_layer.py | 2 +- .../unittests/test_complex_trace_layer.py | 2 +- .../tests/unittests/test_complex_transpose.py | 2 +- .../tests/unittests/test_complex_variable.py | 2 +- .../tests/unittests/test_complex_view_op.py | 8 +- .../paddle/fluid/tests/unittests/test_cond.py | 7 +- .../tests/unittests/test_conv2d_layer.py | 4 +- .../tests/unittests/test_conv3d_layer.py | 4 +- .../paddle/fluid/tests/unittests/test_corr.py | 20 +- .../unittests/test_cosine_embedding_loss.py | 18 +- .../unittests/test_cosine_similarity_api.py | 10 +- .../tests/unittests/test_count_nonzero_api.py | 4 +- .../paddle/fluid/tests/unittests/test_cov.py | 20 +- .../unittests/test_cross_entropy_loss.py | 140 ++--- .../fluid/tests/unittests/test_cross_op.py | 8 +- .../tests/unittests/test_cuda_random_seed.py | 12 +- .../tests/unittests/test_cudnn_grucell.py | 16 +- .../tests/unittests/test_cudnn_lstmcell.py | 72 +-- .../fluid/tests/unittests/test_cumprod_op.py | 4 +- .../fluid/tests/unittests/test_cumsum_op.py | 8 +- .../tests/unittests/test_custom_grad_input.py | 21 +- .../tests/unittests/test_data_norm_op.py | 6 +- .../fluid/tests/unittests/test_deg2rad.py | 4 +- .../tests/unittests/test_determinant_op.py | 8 +- .../tests/unittests/test_dgc_momentum_op.py | 11 +- .../fluid/tests/unittests/test_dgc_op.py | 11 +- .../fluid/tests/unittests/test_diag_embed.py | 4 +- .../fluid/tests/unittests/test_diag_v2.py | 52 +- .../fluid/tests/unittests/test_diagflat.py | 16 +- .../fluid/tests/unittests/test_diagonal_op.py | 12 +- .../fluid/tests/unittests/test_digamma_op.py | 5 +- .../fluid/tests/unittests/test_dist_op.py | 2 +- .../fluid/tests/unittests/test_dist_train.py | 4 +- .../test_distribute_fpn_proposals_op.py | 2 +- .../fluid/tests/unittests/test_dot_op.py | 5 +- .../tests/unittests/test_dropout_nd_op.py | 2 +- .../fluid/tests/unittests/test_dropout_op.py | 82 +-- .../unittests/test_dygraph_multi_forward.py | 10 +- .../unittests/test_dygraph_spectral_norm.py | 6 +- .../unittests/test_dygraph_weight_norm.py | 68 ++- .../unittests/test_dynrnn_gradient_check.py | 50 +- .../unittests/test_dynrnn_static_input.py | 15 +- .../test_eager_deletion_padding_rnn.py | 3 +- .../test_eager_deletion_recurrent_op.py | 20 +- .../unittests/test_egr_code_generate_api.py | 4 +- .../fluid/tests/unittests/test_eig_op.py | 99 ++-- .../fluid/tests/unittests/test_einsum.py | 11 +- .../fluid/tests/unittests/test_einsum_v2.py | 11 +- .../unittests/test_elementwise_gradient_op.py | 6 +- .../test_elementwise_heaviside_op.py | 8 +- .../unittests/test_elementwise_min_op.py | 12 +- .../unittests/test_elementwise_mod_op.py | 4 +- .../paddle/fluid/tests/unittests/test_ema.py | 2 +- .../fluid/tests/unittests/test_ema_fleet.py | 2 +- .../fluid/tests/unittests/test_erf_op.py | 2 +- .../fluid/tests/unittests/test_erfinv_op.py | 6 +- .../tests/unittests/test_executor_and_mul.py | 14 +- .../test_executor_and_use_program_cache.py | 27 +- .../tests/unittests/test_exponential_op.py | 45 +- .../unittests/test_faster_tokenizer_op.py | 64 +- .../unittests/test_fetch_lod_tensor_array.py | 2 +- .../fluid/tests/unittests/test_fetch_var.py | 5 +- .../test_fleet_exe_dist_model_run.py | 4 +- .../tests/unittests/test_fleet_executor.py | 4 +- .../test_fleet_executor_origin_scheduler.py | 4 +- .../test_fleet_executor_with_task_nodes.py | 4 +- .../fluid/tests/unittests/test_fmax_op.py | 16 +- .../fluid/tests/unittests/test_fmin_op.py | 16 +- .../fluid/tests/unittests/test_fold_op.py | 4 +- .../unittests/test_fused_feedforward_op.py | 6 +- .../unittests/test_fused_gemm_epilogue_op.py | 10 +- .../test_fused_transformer_encoder_layer.py | 9 +- .../fluid/tests/unittests/test_gather_op.py | 8 +- .../unittests/test_gaussian_random_op.py | 10 +- .../paddle/fluid/tests/unittests/test_gcd.py | 5 +- .../fluid/tests/unittests/test_gelu_op.py | 12 +- .../test_generate_proposals_v2_op.py | 13 +- .../unittests/test_grad_clip_minimize.py | 514 ++++++++-------- .../tests/unittests/test_gradient_clip.py | 34 +- .../tests/unittests/test_graph_reindex.py | 64 +- .../unittests/test_graph_send_recv_op.py | 58 +- .../tests/unittests/test_group_norm_op.py | 4 +- .../tests/unittests/test_group_norm_op_v2.py | 2 +- .../tests/unittests/test_gumbel_softmax_op.py | 6 +- .../unittests/test_hinge_embedding_loss.py | 16 +- .../fluid/tests/unittests/test_hsigmoid_op.py | 6 +- .../tests/unittests/test_identity_loss_op.py | 5 +- .../fluid/tests/unittests/test_identity_op.py | 4 +- .../test_imperative_auto_mixed_precision.py | 84 ++- ...perative_auto_mixed_precision_for_eager.py | 84 ++- .../tests/unittests/test_imperative_basic.py | 44 +- .../unittests/test_imperative_double_grad.py | 22 +- .../tests/unittests/test_imperative_gan.py | 4 +- .../tests/unittests/test_imperative_gnn.py | 8 +- ..._imperative_lod_tensor_to_selected_rows.py | 5 +- .../tests/unittests/test_imperative_mnist.py | 15 +- .../test_imperative_mnist_sorted_gradient.py | 15 +- .../test_imperative_ocr_attention_model.py | 15 +- .../unittests/test_imperative_optimizer.py | 51 +- .../unittests/test_imperative_optimizer_v2.py | 42 +- .../test_imperative_reinforcement.py | 2 +- .../tests/unittests/test_imperative_resnet.py | 10 +- .../test_imperative_resnet_sorted_gradient.py | 10 +- .../unittests/test_imperative_se_resnext.py | 38 +- .../test_imperative_static_runner_mnist.py | 16 +- .../test_imperative_static_runner_while.py | 7 +- ..._imperative_transformer_sorted_gradient.py | 22 +- .../unittests/test_imperative_triple_grad.py | 12 +- .../tests/unittests/test_index_select_op.py | 8 +- .../fluid/tests/unittests/test_initializer.py | 53 +- .../fluid/tests/unittests/test_inner.py | 16 +- .../tests/unittests/test_inplace_abn_op.py | 15 +- .../fluid/tests/unittests/test_input_spec.py | 14 +- .../tests/unittests/test_instance_norm_op.py | 16 +- .../unittests/test_instance_norm_op_v2.py | 4 +- .../fluid/tests/unittests/test_inverse_op.py | 9 +- .../test_ir_memory_optimize_ifelse_op.py | 4 +- .../fluid/tests/unittests/test_jit_layer.py | 4 +- .../tests/unittests/test_jit_save_load.py | 8 +- .../tests/unittests/test_kldiv_loss_op.py | 2 +- .../fluid/tests/unittests/test_kthvalue_op.py | 14 +- .../fluid/tests/unittests/test_l1_loss.py | 24 +- .../fluid/tests/unittests/test_lambv2_op.py | 2 +- .../tests/unittests/test_layer_norm_op.py | 2 +- .../tests/unittests/test_layer_norm_op_v2.py | 14 +- .../fluid/tests/unittests/test_layers.py | 136 +++-- .../fluid/tests/unittests/test_lbfgs.py | 10 +- .../paddle/fluid/tests/unittests/test_lcm.py | 5 +- .../unittests/test_learning_rate_scheduler.py | 5 +- .../fluid/tests/unittests/test_lerp_op.py | 10 +- .../fluid/tests/unittests/test_lgamma_op.py | 2 +- .../fluid/tests/unittests/test_linear.py | 4 +- .../tests/unittests/test_linear_interp_op.py | 4 +- .../unittests/test_linear_interp_v2_op.py | 4 +- .../unittests/test_lod_tensor_array_ops.py | 68 +-- .../fluid/tests/unittests/test_log_softmax.py | 8 +- .../tests/unittests/test_logcumsumexp_op.py | 18 +- .../fluid/tests/unittests/test_logit_op.py | 4 +- .../fluid/tests/unittests/test_logsumexp.py | 6 +- .../fluid/tests/unittests/test_lrn_op.py | 19 +- .../fluid/tests/unittests/test_lu_op.py | 13 +- .../tests/unittests/test_lu_unpack_op.py | 21 +- .../fluid/tests/unittests/test_manual_seed.py | 4 +- .../unittests/test_math_op_patch_var_base.py | 15 +- .../fluid/tests/unittests/test_matmul_op.py | 14 +- .../tests/unittests/test_matrix_power_op.py | 11 +- .../tests/unittests/test_matrix_rank_op.py | 12 +- .../unittests/test_max_min_amax_amin_op.py | 6 +- .../fluid/tests/unittests/test_maximum_op.py | 16 +- .../fluid/tests/unittests/test_maxout_op.py | 10 +- .../fluid/tests/unittests/test_mean_op.py | 11 +- .../fluid/tests/unittests/test_memcpy_op.py | 8 +- .../tests/unittests/test_merged_adam_op.py | 6 +- .../unittests/test_merged_momentum_op.py | 6 +- .../fluid/tests/unittests/test_minimum_op.py | 16 +- .../fluid/tests/unittests/test_mode_op.py | 10 +- .../fluid/tests/unittests/test_momentum_op.py | 18 +- .../fluid/tests/unittests/test_mse_loss.py | 2 +- .../tests/unittests/test_multiclass_nms_op.py | 2 +- .../tests/unittests/test_multinomial_op.py | 36 +- .../fluid/tests/unittests/test_multiply.py | 16 +- .../fluid/tests/unittests/test_mv_op.py | 4 +- .../fluid/tests/unittests/test_nanmean_api.py | 9 +- .../fluid/tests/unittests/test_nanmedian.py | 17 +- .../tests/unittests/test_nearest_interp_op.py | 7 +- .../unittests/test_nearest_interp_v2_op.py | 20 +- .../fluid/tests/unittests/test_neg_op.py | 6 +- .../tests/unittests/test_nn_dice_loss.py | 2 +- .../unittests/test_nn_margin_rank_loss.py | 10 +- .../test_nn_quant_functional_layers.py | 2 +- .../tests/unittests/test_nn_sigmoid_op.py | 8 +- .../fluid/tests/unittests/test_nonzero_api.py | 8 +- .../fluid/tests/unittests/test_normalize.py | 16 +- .../tests/unittests/test_npair_loss_op.py | 6 +- .../unittests/test_optimizer_for_varbase.py | 8 +- .../test_optimizer_in_control_flow.py | 14 +- .../fluid/tests/unittests/test_outer.py | 16 +- .../fluid/tests/unittests/test_pad3d_op.py | 66 +-- .../test_paddle_imperative_double_grad.py | 22 +- .../tests/unittests/test_pairwise_distance.py | 69 ++- .../unittests/test_parallel_executor_mnist.py | 3 +- .../test_parallel_executor_run_cinn.py | 5 +- ...test_parallel_executor_test_while_train.py | 8 +- .../tests/unittests/test_pixel_shuffle.py | 6 +- .../tests/unittests/test_pixel_unshuffle.py | 6 +- .../fluid/tests/unittests/test_poisson_op.py | 3 +- .../fluid/tests/unittests/test_pool1d_api.py | 24 +- .../fluid/tests/unittests/test_pool2d_api.py | 51 +- .../fluid/tests/unittests/test_pool2d_op.py | 6 +- .../fluid/tests/unittests/test_pool3d_api.py | 51 +- .../paddle/fluid/tests/unittests/test_pow.py | 32 +- .../fluid/tests/unittests/test_prod_op.py | 58 +- .../fluid/tests/unittests/test_prune.py | 2 +- .../tests/unittests/test_psroi_pool_op.py | 12 +- .../tests/unittests/test_put_along_axis_op.py | 8 +- .../test_python_bf16_numpy_datatype.py | 2 +- .../fluid/tests/unittests/test_qr_op.py | 24 +- .../test_quantile_and_nanquantile.py | 556 +++++++++--------- .../fluid/tests/unittests/test_rad2deg.py | 6 +- .../fluid/tests/unittests/test_randint_op.py | 9 +- .../fluid/tests/unittests/test_random_seed.py | 56 +- .../tests/unittests/test_recurrent_op.py | 16 +- .../fluid/tests/unittests/test_reduce_op.py | 12 +- .../fluid/tests/unittests/test_regularizer.py | 18 +- .../tests/unittests/test_regularizer_api.py | 18 +- .../fluid/tests/unittests/test_renorm_op.py | 10 +- .../unittests/test_reorder_lod_tensor.py | 39 +- .../unittests/test_repeat_interleave_op.py | 16 +- .../fluid/tests/unittests/test_reshape_op.py | 6 +- .../unittests/test_resnet50_with_cinn.py | 2 +- .../fluid/tests/unittests/test_rmsprop_op.py | 11 +- .../tests/unittests/test_rnn_cell_api.py | 6 +- .../tests/unittests/test_rnn_decode_api.py | 8 +- .../unittests/test_rnn_memory_helper_op.py | 9 +- .../fluid/tests/unittests/test_roll_op.py | 14 +- .../tests/unittests/test_run_program_op.py | 10 +- .../fluid/tests/unittests/test_segment_ops.py | 11 +- .../unittests/test_select_input_output_op.py | 8 +- .../fluid/tests/unittests/test_selu_op.py | 6 +- .../fluid/tests/unittests/test_sgd_op.py | 50 +- .../tests/unittests/test_share_data_op.py | 4 +- .../tests/unittests/test_shrink_rnn_memory.py | 12 +- .../fluid/tests/unittests/test_signal.py | 68 +-- .../fluid/tests/unittests/test_slice_op.py | 4 +- .../unittests/test_softmax_mask_fuse_op.py | 4 +- ...est_softmax_mask_fuse_upper_triangle_op.py | 4 +- .../fluid/tests/unittests/test_softmax_op.py | 8 +- .../fluid/tests/unittests/test_solve_op.py | 46 +- .../tests/unittests/test_sparse_addmm_op.py | 20 +- .../unittests/test_sparse_attention_op.py | 12 +- .../unittests/test_sparse_elementwise_op.py | 48 +- .../test_sparse_fused_attention_op.py | 18 +- .../tests/unittests/test_sparse_matmul_op.py | 47 +- .../tests/unittests/test_sparse_mv_op.py | 30 +- .../tests/unittests/test_sparse_softmax_op.py | 30 +- .../tests/unittests/test_sparse_unary_op.py | 9 +- .../test_split_and_merge_lod_tensor_op.py | 4 +- .../fluid/tests/unittests/test_split_op.py | 60 +- .../fluid/tests/unittests/test_squeeze_op.py | 12 +- .../fluid/tests/unittests/test_stack_op.py | 6 +- .../fluid/tests/unittests/test_std_layer.py | 6 +- .../fluid/tests/unittests/test_subtract_op.py | 16 +- .../fluid/tests/unittests/test_sum_op.py | 6 +- .../fluid/tests/unittests/test_svd_op.py | 4 +- .../fluid/tests/unittests/test_switch_case.py | 66 ++- .../unittests/test_sync_batch_norm_op.py | 22 +- .../unittests/test_take_along_axis_op.py | 4 +- .../fluid/tests/unittests/test_tensor_uva.py | 4 +- .../fluid/tests/unittests/test_tf32_cublas.py | 2 +- .../fluid/tests/unittests/test_top_k_v2_op.py | 116 ++-- .../fluid/tests/unittests/test_trace_op.py | 4 +- .../unittests/test_triangular_solve_op.py | 4 +- .../tests/unittests/test_tril_triu_op.py | 8 +- .../unittests/test_trilinear_interp_op.py | 7 +- .../unittests/test_trilinear_interp_v2_op.py | 7 +- .../unittests/test_triplet_margin_loss.py | 86 ++- .../test_triplet_margin_with_distance_loss.py | 69 ++- .../fluid/tests/unittests/test_trunc_op.py | 6 +- .../fluid/tests/unittests/test_unfold_op.py | 4 +- .../unittests/test_uniform_random_bf16_op.py | 15 +- .../test_uniform_random_inplace_op.py | 2 +- .../tests/unittests/test_uniform_random_op.py | 37 +- .../fluid/tests/unittests/test_unique.py | 6 +- .../fluid/tests/unittests/test_unpool1d_op.py | 19 +- .../fluid/tests/unittests/test_unpool3d_op.py | 19 +- .../fluid/tests/unittests/test_unpool_op.py | 8 +- .../tests/unittests/test_unsqueeze_op.py | 4 +- .../fluid/tests/unittests/test_var_base.py | 4 +- .../tests/unittests/test_variance_layer.py | 6 +- .../fluid/tests/unittests/test_warpctc_op.py | 9 +- .../unittests/test_weight_normalization.py | 31 +- .../fluid/tests/unittests/test_where_op.py | 4 +- .../tests/unittests/test_while_loop_op.py | 55 +- .../fluid/tests/unittests/test_zeropad2d.py | 16 +- .../unittests/xpu/test_arg_max_op_xpu.py | 10 +- .../unittests/xpu/test_assign_value_op_xpu.py | 5 +- .../unittests/xpu/test_batch_norm_op_xpu.py | 12 +- .../xpu/test_elementwise_mod_op_xpu.py | 4 +- .../xpu/test_gaussian_random_op_xpu.py | 7 +- .../tests/unittests/xpu/test_matmul_op_xpu.py | 6 +- python/paddle/tests/test_async_read_write.py | 17 +- python/paddle/tests/test_dlpack.py | 6 +- 426 files changed, 4459 insertions(+), 3627 deletions(-) diff --git a/python/paddle/fluid/contrib/slim/tests/test_weight_quantization_mobilenetv1.py b/python/paddle/fluid/contrib/slim/tests/test_weight_quantization_mobilenetv1.py index cbe0326c46a..e6af06c6cea 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_weight_quantization_mobilenetv1.py +++ b/python/paddle/fluid/contrib/slim/tests/test_weight_quantization_mobilenetv1.py @@ -123,13 +123,13 @@ class TestWeightQuantization(unittest.TestCase): res_fp16 = self.run_models(save_model_dir, model_filename, params_filename, input_data, True) - self.assertTrue( - np.allclose(res_fp32, - res_fp16, - rtol=1e-5, - atol=1e-08, - equal_nan=True), - msg='Failed to test the accuracy of the fp32 and fp16 model.') + np.testing.assert_allclose( + res_fp32, + res_fp16, + rtol=1e-05, + atol=1e-08, + equal_nan=True, + err_msg='Failed to test the accuracy of the fp32 and fp16 model.') try: os.system("rm -rf {}".format(save_model_dir)) diff --git a/python/paddle/fluid/contrib/tests/test_correlation.py b/python/paddle/fluid/contrib/tests/test_correlation.py index c98cbd1dd93..d3ab5dff601 100644 --- a/python/paddle/fluid/contrib/tests/test_correlation.py +++ b/python/paddle/fluid/contrib/tests/test_correlation.py @@ -114,7 +114,7 @@ class TestCorrelationOp(unittest.TestCase): }, fetch_list=[out.name, loss.name]) - self.assertTrue(np.allclose(res[0], out_np)) + np.testing.assert_allclose(res[0], out_np, rtol=1e-05, atol=1e-8) class Net(fluid.dygraph.Layer): @@ -159,7 +159,7 @@ class TestCorrelationOpDyGraph(unittest.TestCase): corr_pd = Net('corr_pd') y = corr_pd(x1, x2) out = y.numpy() - self.assertTrue(np.allclose(out, out_np)) + np.testing.assert_allclose(out, out_np, rtol=1e-05, atol=1e-8) if __name__ == '__main__': diff --git a/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py b/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py index 4682be8114a..07adb908526 100644 --- a/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py +++ b/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py @@ -132,10 +132,12 @@ class TestModelCastBF16(unittest.TestCase): amp_fun=_amp_fun, startup_prog=startup_prog) - self.assertTrue( - np.allclose(cutf(static_ret_bf16), cutf(static_ret), 1e-2)) - self.assertTrue( - np.allclose(cutf(static_ret_bf16), cutf(ret_fp32bf16), 1e-2)) + np.testing.assert_allclose(cutf(static_ret_bf16), + cutf(static_ret), + rtol=0.01) + np.testing.assert_allclose(cutf(static_ret_bf16), + cutf(ret_fp32bf16), + rtol=0.01) with self.static_graph(): t = layers.data(name='t', shape=[size, size], dtype='float32') diff --git a/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py b/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py index c062a039f28..bd229494662 100644 --- a/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py +++ b/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py @@ -228,18 +228,18 @@ class TestImageMultiPrecision(unittest.TestCase): use_nesterov=use_nesterov, optimizer=optimizer) - self.assertTrue(np.allclose(np.array(train_loss_fp16), - np.array(train_loss_fp32), - rtol=1e-02, - atol=1e-05, - equal_nan=True), - msg='Failed to train in pure FP16.') - self.assertTrue(np.allclose(np.array(test_loss_fp16), - np.array(test_loss_fp32), - rtol=1e-02, - atol=1e-05, - equal_nan=True), - msg='Failed to test in pure FP16.') + np.testing.assert_allclose(np.array(train_loss_fp16), + np.array(train_loss_fp32), + rtol=0.01, + atol=1e-05, + equal_nan=True, + err_msg='Failed to train in pure FP16.') + np.testing.assert_allclose(np.array(test_loss_fp16), + np.array(test_loss_fp32), + rtol=0.01, + atol=1e-05, + equal_nan=True, + err_msg='Failed to test in pure FP16.') do_test(use_nesterov=False) do_test(use_nesterov=True) diff --git a/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py b/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py index 4bb1ed72b7b..0b0186e24ae 100644 --- a/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py +++ b/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py @@ -185,9 +185,12 @@ class TestWeightDecay(unittest.TestCase): param_sum2 = self.check_weight_decay2(place, model) for i in range(len(param_sum1)): - self.assertTrue( - np.allclose(param_sum1[i], param_sum2[i]), - "Current place: {}, i: {}, sum1: {}, sum2: {}".format( + np.testing.assert_allclose( + param_sum1[i], + param_sum2[i], + rtol=1e-05, + err_msg='Current place: {}, i: {}, sum1: {}, sum2: {}'. + format( place, i, param_sum1[i] [~np.isclose(param_sum1[i], param_sum2[i])], param_sum2[i] diff --git a/python/paddle/fluid/tests/custom_op/test_custom_tanh_double_grad.py b/python/paddle/fluid/tests/custom_op/test_custom_tanh_double_grad.py index 56093767993..b36c99d1dfa 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_tanh_double_grad.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_tanh_double_grad.py @@ -77,18 +77,24 @@ class TestCustomTanhDoubleGradJit(unittest.TestCase): custom_ops.custom_tanh, device, dtype, x) pd_out, pd_dx_grad, pd_dout = custom_tanh_double_grad_dynamic( paddle.tanh, device, dtype, x) - self.assertTrue( - np.allclose(out, pd_out), - "custom op out: {},\n paddle api out: {}".format( + np.testing.assert_allclose( + out, + pd_out, + rtol=1e-05, + err_msg='custom op out: {},\n paddle api out: {}'.format( out, pd_out)) - self.assertTrue( - np.allclose(dx_grad, pd_dx_grad), - "custom op dx grad: {},\n paddle api dx grad: {}".format( - dx_grad, pd_dx_grad)) - self.assertTrue( - np.allclose(dout, pd_dout), - "custom op out grad: {},\n paddle api out grad: {}".format( - dout, pd_dout)) + np.testing.assert_allclose( + dx_grad, + pd_dx_grad, + rtol=1e-05, + err_msg='custom op dx grad: {},\n paddle api dx grad: {}'. + format(dx_grad, pd_dx_grad)) + np.testing.assert_allclose( + dout, + pd_dout, + rtol=1e-05, + err_msg='custom op out grad: {},\n paddle api out grad: {}'. + format(dout, pd_dout)) def test_func_double_grad_dynamic(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) diff --git a/python/paddle/fluid/tests/test_if_else_op.py b/python/paddle/fluid/tests/test_if_else_op.py index 4e140032dd8..19a33cc1e85 100644 --- a/python/paddle/fluid/tests/test_if_else_op.py +++ b/python/paddle/fluid/tests/test_if_else_op.py @@ -201,9 +201,12 @@ class TestIfElse(unittest.TestCase): fetch_list=[out]) o2 = self.numpy_cal() - self.assertTrue( - np.allclose(o1, o2, atol=1e-8), - "IfElse result : " + str(o1) + "\n Numpy result :" + str(o2)) + np.testing.assert_allclose( + o1, + o2, + rtol=1e-05, + atol=1e-08, + ) def test_cpu(self): self.compare_ifelse_op_and_numpy(fluid.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/benchmark.py b/python/paddle/fluid/tests/unittests/benchmark.py index 14479e7a271..d8bfaecfd78 100644 --- a/python/paddle/fluid/tests/unittests/benchmark.py +++ b/python/paddle/fluid/tests/unittests/benchmark.py @@ -46,9 +46,10 @@ class BenchmarkSuite(OpTest): actual_t = np.array(item_gpu_out) var_name = variable if isinstance( variable, six.string_types) else variable.name - self.assertTrue( - np.allclose(actual_t, expect_t, atol=atol), "Output (" + - var_name + ") has diff" + str(actual_t) + "\n" + str(expect_t)) + np.testing.assert_allclose(actual_t, + expect_t, + rtol=1e-05, + atol=atol) self.assertListEqual(actual.lod(), expect.lod(), "Output (" + var_name + ") has different lod") diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/dist_pass_test_base.py b/python/paddle/fluid/tests/unittests/distributed_passes/dist_pass_test_base.py index f13439575c9..4f89b69a50b 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/dist_pass_test_base.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/dist_pass_test_base.py @@ -105,12 +105,11 @@ class DistPassTestBase(unittest.TestCase): if out_var_no_pass is None: self.assertTrue(out_var_pass is None) else: - self.assertTrue( - np.allclose(out_var_no_pass, - out_var_pass, - rtol=self.rtol, - atol=self.atol, - equal_nan=self.equal_nan)) + np.testing.assert_allclose(out_var_no_pass, + out_var_pass, + rtol=self.rtol, + atol=self.atol, + equal_nan=self.equal_nan) @classmethod def _to_var_names(cls, names_or_vars): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_basic_api_transformation.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_basic_api_transformation.py index b818ed95a24..b39f2e39430 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_basic_api_transformation.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_basic_api_transformation.py @@ -104,9 +104,7 @@ class TestDygraphBasicApi_ToVariable(unittest.TestCase): self.dygraph_func = func dygraph_res = self.get_dygraph_output() static_res = self.get_static_output() - self.assertTrue(np.allclose(dygraph_res, static_res), - msg='dygraph is {}\n static_res is {}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05) # 1. test Apis that inherit from layers.Layer @@ -252,9 +250,7 @@ class TestDygraphBasicApi(unittest.TestCase): def test_transformed_static_result(self): dygraph_res = self.get_dygraph_output() static_res = self.get_static_output() - self.assertTrue(np.allclose(dygraph_res, static_res), - msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05) class TestDygraphBasicApi_BilinearTensorProduct(TestDygraphBasicApi): @@ -419,9 +415,7 @@ class TestDygraphBasicApi_CosineDecay(unittest.TestCase): def test_transformed_static_result(self): dygraph_res = self.get_dygraph_output() static_res = self.get_static_output() - self.assertTrue(np.allclose(dygraph_res, static_res), - msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05) class TestDygraphBasicApi_ExponentialDecay(TestDygraphBasicApi_CosineDecay): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bert.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bert.py index f26ed2a6823..1929df30fe3 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bert.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bert.py @@ -181,12 +181,8 @@ class TestBert(unittest.TestCase): self.data_reader) dygraph_loss, dygraph_ppl = self.train_dygraph(self.bert_config, self.data_reader) - self.assertTrue(np.allclose(static_loss, dygraph_loss), - msg="static_loss: {} \n dygraph_loss: {}".format( - static_loss, dygraph_loss)) - self.assertTrue(np.allclose(static_ppl, dygraph_ppl), - msg="static_ppl: {} \n dygraph_ppl: {}".format( - static_ppl, dygraph_ppl)) + np.testing.assert_allclose(static_loss, dygraph_loss, rtol=1e-05) + np.testing.assert_allclose(static_ppl, dygraph_ppl, rtol=1e-05) self.verify_predict() @@ -200,19 +196,25 @@ class TestBert(unittest.TestCase): for dy_res, st_res, dy_jit_res, predictor_res in zip( dygraph_pred_res, static_pred_res, dygraph_jit_pred_res, predictor_pred_res): - self.assertTrue( - np.allclose(st_res, dy_res), - "dygraph_res: {},\n static_res: {}".format( + np.testing.assert_allclose( + st_res, + dy_res, + rtol=1e-05, + err_msg='dygraph_res: {},\n static_res: {}'.format( dy_res[~np.isclose(st_res, dy_res)], st_res[~np.isclose(st_res, dy_res)])) - self.assertTrue( - np.allclose(st_res, dy_jit_res), - "dygraph_jit_res: {},\n static_res: {}".format( + np.testing.assert_allclose( + st_res, + dy_jit_res, + rtol=1e-05, + err_msg='dygraph_jit_res: {},\n static_res: {}'.format( dy_jit_res[~np.isclose(st_res, dy_jit_res)], st_res[~np.isclose(st_res, dy_jit_res)])) - self.assertTrue( - np.allclose(st_res, predictor_res), - "dygraph_jit_res: {},\n static_res: {}".format( + np.testing.assert_allclose( + st_res, + predictor_res, + rtol=1e-05, + err_msg='dygraph_jit_res: {},\n static_res: {}'.format( predictor_res[~np.isclose(st_res, predictor_res)], st_res[~np.isclose(st_res, predictor_res)])) break diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py index f240fb9e5c1..e54c6274502 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py @@ -705,11 +705,14 @@ class TestTrain(unittest.TestCase): static_res = self.train_bmn(self.args, self.place, to_static=True) dygraph_res = self.train_bmn(self.args, self.place, to_static=False) - self.assertTrue( - np.allclose(dygraph_res, static_res), - "dygraph_res: {},\n static_res: {}".format( + np.testing.assert_allclose( + dygraph_res, + static_res, + rtol=1e-05, + err_msg='dygraph_res: {},\n static_res: {}'.format( dygraph_res[~np.isclose(dygraph_res, static_res)], - static_res[~np.isclose(dygraph_res, static_res)])) + static_res[~np.isclose(dygraph_res, static_res)]), + atol=1e-8) # Prediction needs trained models, so put `test_predict` at last of `test_train` self.verify_predict() @@ -728,21 +731,30 @@ class TestTrain(unittest.TestCase): for dy_res, st_res, dy_jit_res, predictor_res in zip( dygraph_pred_res, static_pred_res, dygraph_jit_pred_res, predictor_pred_res): - self.assertTrue( - np.allclose(st_res, dy_res), - "dygraph_res: {},\n static_res: {}".format( + np.testing.assert_allclose( + st_res, + dy_res, + rtol=1e-05, + err_msg='dygraph_res: {},\n static_res: {}'.format( dy_res[~np.isclose(st_res, dy_res)], - st_res[~np.isclose(st_res, dy_res)])) - self.assertTrue( - np.allclose(st_res, dy_jit_res), - "dygraph_jit_res: {},\n static_res: {}".format( + st_res[~np.isclose(st_res, dy_res)]), + atol=1e-8) + np.testing.assert_allclose( + st_res, + dy_jit_res, + rtol=1e-05, + err_msg='dygraph_jit_res: {},\n static_res: {}'.format( dy_jit_res[~np.isclose(st_res, dy_jit_res)], - st_res[~np.isclose(st_res, dy_jit_res)])) - self.assertTrue( - np.allclose(st_res, predictor_res), - "dygraph_jit_res: {},\n static_res: {}".format( + st_res[~np.isclose(st_res, dy_jit_res)]), + atol=1e-8) + np.testing.assert_allclose( + st_res, + predictor_res, + rtol=1e-05, + err_msg='dygraph_jit_res: {},\n static_res: {}'.format( predictor_res[~np.isclose(st_res, predictor_res)], - st_res[~np.isclose(st_res, predictor_res)])) + st_res[~np.isclose(st_res, predictor_res)]), + atol=1e-8) break def predict_dygraph(self, data): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py index 9edff1859e4..90a31bf5772 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_break_continue.py @@ -230,9 +230,12 @@ class TestContinueInFor(unittest.TestCase): def test_transformed_static_result(self): static_res = self.run_static_mode() dygraph_res = self.run_dygraph_mode() - self.assertTrue(np.allclose(dygraph_res, static_res), - msg='dygraph res is {}\nstatic_res is {}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose( + dygraph_res, + static_res, + rtol=1e-05, + err_msg='dygraph res is {}\nstatic_res is {}'.format( + dygraph_res, static_res)) class TestContinueInForAtEnd(TestContinueInFor): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cache_program.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cache_program.py index 68e725d7fc5..4850d677e4f 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cache_program.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cache_program.py @@ -54,9 +54,11 @@ class TestCacheProgram(unittest.TestCase): prev_out, (tuple, list)) else prev_out.numpy() cur_out_numpy = cur_out[0].numpy() if isinstance( cur_out, (tuple, list)) else cur_out.numpy() - self.assertTrue( - np.allclose(prev_out_numpy, cur_out_numpy), - msg= + np.testing.assert_allclose( + prev_out_numpy, + cur_out_numpy, + rtol=1e-05, + err_msg= 'Output in previous batch is {}\n Output in current batch is \n{}' .format(prev_out_numpy, cur_out_numpy)) self.assertEqual(prev_ops, cur_ops) @@ -106,9 +108,12 @@ class TestCacheProgramWithOptimizer(unittest.TestCase): def test_with_optimizer(self): dygraph_loss = self.train_dygraph() static_loss = self.train_static() - self.assertTrue(np.allclose(dygraph_loss, static_loss), - msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_loss, static_loss)) + np.testing.assert_allclose( + dygraph_loss, + static_loss, + rtol=1e-05, + err_msg='dygraph is {}\n static_res is \n{}'.format( + dygraph_loss, static_loss)) def simple_func(x): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cast.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cast.py index da67b08287c..5c83c74cfc7 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cast.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cast.py @@ -91,9 +91,11 @@ class TestCastBase(unittest.TestCase): msg='The target dtype is {}, but the casted dtype is {}.'.format( self.cast_dtype, res.dtype)) ref_val = self.input.astype(self.cast_dtype) - self.assertTrue( - np.allclose(res, ref_val), - msg='The casted value is {}.\nThe correct value is {}.'.format( + np.testing.assert_allclose( + res, + ref_val, + rtol=1e-05, + err_msg='The casted value is {}.\nThe correct value is {}.'.format( res, ref_val)) @@ -149,9 +151,11 @@ class TestMixCast(TestCastBase): self.cast_dtype, res.dtype)) ref_val = self.input.astype(self.cast_int).astype( self.cast_float).astype(self.cast_bool).astype(self.cast_dtype) - self.assertTrue( - np.allclose(res, ref_val), - msg='The casted value is {}.\nThe correct value is {}.'.format( + np.testing.assert_allclose( + res, + ref_val, + rtol=1e-05, + err_msg='The casted value is {}.\nThe correct value is {}.'.format( res, ref_val)) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_container.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_container.py index 6ed32e49775..73f0262daa1 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_container.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_container.py @@ -96,9 +96,11 @@ class TestSequential(unittest.TestCase): out = self.net(x) if to_static: load_out = self._test_load(self.net, x) - self.assertTrue(np.allclose(load_out, out), - msg='load_out is {}\st_out is {}'.format( - load_out, out)) + np.testing.assert_allclose( + load_out, + out, + rtol=1e-05, + err_msg='load_out is {}\\st_out is {}'.format(load_out, out)) return out @@ -106,9 +108,12 @@ class TestSequential(unittest.TestCase): paddle.jit.set_code_level(100) dy_out = self._run(to_static=False) st_out = self._run(to_static=True) - self.assertTrue(np.allclose(dy_out, st_out), - msg='dygraph_res is {}\nstatic_res is {}'.format( - dy_out, st_out)) + np.testing.assert_allclose( + dy_out, + st_out, + rtol=1e-05, + err_msg='dygraph_res is {}\nstatic_res is {}'.format( + dy_out, st_out)) def _test_load(self, net, x): paddle.jit.save(net, self.model_path) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call.py index 3c1f31d0638..b3cdde63639 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call.py @@ -91,9 +91,12 @@ class TestRecursiveCall1(unittest.TestCase): def test_transformed_static_result(self): static_res = self.get_static_output() dygraph_res = self.get_dygraph_output() - self.assertTrue(np.allclose(dygraph_res, static_res), - msg='dygraph res is {}\nstatic_res is {}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose( + dygraph_res, + static_res, + rtol=1e-05, + err_msg='dygraph res is {}\nstatic_res is {}'.format( + dygraph_res, static_res)) lambda_fun = lambda x: x @@ -176,9 +179,7 @@ class TestRecursiveCall2(unittest.TestCase): def test_transformed_static_result(self): dygraph_res = self.get_dygraph_output() static_res = self.get_static_output() - self.assertTrue(np.allclose(dygraph_res, static_res), - msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05) class TestThirdPartyLibrary(TestRecursiveCall2): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cpu_cuda_to_tensor.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cpu_cuda_to_tensor.py index 7fc7002aca3..6d7bd58ee8f 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cpu_cuda_to_tensor.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cpu_cuda_to_tensor.py @@ -46,9 +46,9 @@ class TestToTensor(unittest.TestCase): x = paddle.to_tensor([3]) print(paddle.jit.to_static(func).code) - self.assertTrue( - np.allclose( - paddle.jit.to_static(func)(x).numpy(), np.array([1, 2, 3, 4]))) + np.testing.assert_allclose(paddle.jit.to_static(func)(x).numpy(), + np.array([1, 2, 3, 4]), + rtol=1e-05) class TestToTensor1(unittest.TestCase): @@ -66,9 +66,9 @@ class TestToTensor1(unittest.TestCase): x = paddle.to_tensor([3]) print(paddle.jit.to_static(func).code) - self.assertTrue( - np.allclose( - paddle.jit.to_static(func)(x).numpy(), np.array([1, 2, 3, 4]))) + np.testing.assert_allclose(paddle.jit.to_static(func)(x).numpy(), + np.array([1, 2, 3, 4]), + rtol=1e-05) class TestToTensor2(unittest.TestCase): @@ -81,10 +81,9 @@ class TestToTensor2(unittest.TestCase): x = paddle.to_tensor([3]) print(paddle.jit.to_static(func).code) - self.assertTrue( - np.allclose( - paddle.jit.to_static(func)(x).numpy(), - np.array([[1], [2], [3], [4]]))) + np.testing.assert_allclose(paddle.jit.to_static(func)(x).numpy(), + np.array([[1], [2], [3], [4]]), + rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_declarative.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_declarative.py index 46c847938c6..947cfa35d43 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_declarative.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_declarative.py @@ -127,7 +127,7 @@ class TestInputSpec(unittest.TestCase): jit.save(net, self.model_path) infer_net = fluid.dygraph.jit.load(self.model_path) pred = infer_net(x) - self.assertTrue(np.allclose(out.numpy(), pred.numpy())) + np.testing.assert_allclose(out.numpy(), pred.numpy(), rtol=1e-05) # 3. we can decorate any method x_2 = to_variable(np.ones([4, 20]).astype('float32')) @@ -218,25 +218,33 @@ class TestDifferentInputSpecCacheProgram(unittest.TestCase): # [16, 10] + [10] (varbase) out_1 = foo(to_variable(x_data), to_variable(y_data)) - self.assertTrue(np.allclose(x_data + y_data, out_1.numpy())) + np.testing.assert_allclose(x_data + y_data, + out_1.numpy(), + rtol=1e-05) self.assertTrue(len(foo.program_cache) == 1) self.assertTrue(len(foo.program_cache.concrete_programs()) == 1) first_program = foo.program_cache.last() # [16, 10] + [10] (numpy) out_2 = foo(to_variable(x_data), y_data) - self.assertTrue(np.allclose(x_data + y_data, out_2.numpy())) + np.testing.assert_allclose(x_data + y_data, + out_2.numpy(), + rtol=1e-05) self.assertTrue(len(foo.program_cache) == 1) # [16, 10] + [10] (numpy) out_3 = foo(to_variable(x_data), z_data) - self.assertTrue(np.allclose(x_data + z_data, out_3.numpy())) + np.testing.assert_allclose(x_data + z_data, + out_3.numpy(), + rtol=1e-05) # hit cache program self.assertTrue(len(foo.program_cache) == 1) # [16, 10] + [10] (numpy) with other different arguments (c=3) out_4 = foo(to_variable(x_data), z_data, 3) - self.assertTrue(np.allclose(x_data + z_data, out_4.numpy())) + np.testing.assert_allclose(x_data + z_data, + out_4.numpy(), + rtol=1e-05) # create a new program self.assertTrue(len(foo.program_cache) == 2) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py index e8999acce0e..78926528dc5 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py @@ -195,9 +195,12 @@ class TestDictPop(unittest.TestCase): def test_transformed_result(self): dygraph_res = self._run_dygraph() static_res = self._run_static() - self.assertTrue(np.allclose(dygraph_res, static_res), - msg='dygraph result is {}\nstatic result is {}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose( + dygraph_res, + static_res, + rtol=1e-05, + err_msg='dygraph result is {}\nstatic result is {}'.format( + dygraph_res, static_res)) class TestDictPop2(TestDictPop): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_drop_path.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_drop_path.py index d5c83235747..201da70b017 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_drop_path.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_drop_path.py @@ -48,9 +48,9 @@ class TestTrainEval(unittest.TestCase): eval_out = x.numpy() train_out = x.numpy() * 2 self.model.train() - self.assertTrue(np.allclose(self.model(x).numpy(), train_out)) + np.testing.assert_allclose(self.model(x).numpy(), train_out, rtol=1e-05) self.model.eval() - self.assertTrue(np.allclose(self.model(x).numpy(), eval_out)) + np.testing.assert_allclose(self.model(x).numpy(), eval_out, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_fetch_feed.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_fetch_feed.py index d3654260d8d..3e24fde92f8 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_fetch_feed.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_fetch_feed.py @@ -92,9 +92,12 @@ class TestPool2D(unittest.TestCase): dygraph_res = self.train_dygraph() static_res = self.train_static() - self.assertTrue(np.allclose(dygraph_res, static_res), - msg='dygraph_res is {}\n static_res is \n{}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose( + dygraph_res, + static_res, + rtol=1e-05, + err_msg='dygraph_res is {}\n static_res is \n{}'.format( + dygraph_res, static_res)) class TestLinear(TestPool2D): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_for_enumerate.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_for_enumerate.py index 4c69849ccbd..df90deb4913 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_for_enumerate.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_for_enumerate.py @@ -385,7 +385,7 @@ class TestTransform(TestTransformBase): st_outs = (st_outs, ) for x, y in zip(dy_outs, st_outs): - self.assertTrue(np.allclose(x.numpy(), y.numpy())) + np.testing.assert_allclose(x.numpy(), y.numpy(), rtol=1e-05) class TestTransformForOriginalList(TestTransform): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py index 108c6228499..aa3f916f311 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_full_name_usage.py @@ -65,11 +65,15 @@ class TestFullNameDecorator(unittest.TestCase): x = np.ones([1, 2]).astype("float32") answer = np.zeros([1, 2]).astype("float32") with fluid.dygraph.guard(): - self.assertTrue( - np.allclose(dygraph_decorated_func(x).numpy(), answer)) - self.assertTrue(np.allclose(jit_decorated_func(x).numpy(), answer)) - self.assertTrue( - np.allclose(decorated_call_decorated(x).numpy(), answer)) + np.testing.assert_allclose(dygraph_decorated_func(x).numpy(), + answer, + rtol=1e-05) + np.testing.assert_allclose(jit_decorated_func(x).numpy(), + answer, + rtol=1e-05) + np.testing.assert_allclose(decorated_call_decorated(x).numpy(), + answer, + rtol=1e-05) with self.assertRaises(NotImplementedError): DoubleDecorated().double_decorated_func1(x) with self.assertRaises(NotImplementedError): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_grad.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_grad.py index f7eccf1f9e7..ef86632eafb 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_grad.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_grad.py @@ -88,7 +88,7 @@ class TestGrad(unittest.TestCase): def test_forward(self): dygraph_res = self._run(self.func, to_static=False) static_res = self._run(self.func, to_static=True) - self.assertTrue(np.allclose(static_res, dygraph_res)) + np.testing.assert_allclose(static_res, dygraph_res, rtol=1e-05) class TestGradLinear(TestGrad): @@ -116,7 +116,7 @@ class TestGradLinear(TestGrad): origin_res = self.func(self.x).numpy() load_res = load_func(self.x).numpy() - self.assertTrue(np.allclose(origin_res, load_res)) + np.testing.assert_allclose(origin_res, load_res, rtol=1e-05) def test_save_train_program(self): grad_clip = paddle.nn.ClipGradByGlobalNorm(2.0) @@ -136,7 +136,7 @@ class TestGradLinear(TestGrad): origin_res = self.func(self.x).numpy() load_res = load_func(self.x).numpy() - self.assertTrue(np.allclose(origin_res, load_res)) + np.testing.assert_allclose(origin_res, load_res, rtol=1e-05) class TestNoGradLinear(TestGradLinear): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_gradient_aggregation.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_gradient_aggregation.py index 3b7cca31ce9..37ed8b6c88d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_gradient_aggregation.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_gradient_aggregation.py @@ -50,10 +50,9 @@ class TestGradientAggregationInDy2Static(unittest.TestCase): return net.linear1.weight.grad inp = paddle.to_tensor(np.random.randn(10, )).astype("float32") - self.assertTrue( - np.allclose( - simplenet_grad(inp, True).numpy(), - simplenet_grad(inp, False).numpy())) + np.testing.assert_allclose(simplenet_grad(inp, True).numpy(), + simplenet_grad(inp, False).numpy(), + rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py index acfd2910269..63853924253 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py @@ -508,7 +508,9 @@ class TestDy2StIfElseBackward(unittest.TestCase): net.train() out = net(a, b, c) out.backward() - self.assertTrue(np.allclose((b + net.param).numpy(), out.numpy())) + np.testing.assert_allclose((b + net.param).numpy(), + out.numpy(), + rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_isinstance.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_isinstance.py index 95432b58a33..dc3b4dd74a6 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_isinstance.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_isinstance.py @@ -108,8 +108,11 @@ class TestIsinstance(unittest.TestCase): def _test_model(self, model): st_out = train(model, to_static=True) dy_out = train(model, to_static=False) - self.assertTrue(np.allclose(dy_out, st_out), - msg="dy_out:\n {}\n st_out:\n{}".format(dy_out, st_out)) + np.testing.assert_allclose(dy_out, + st_out, + rtol=1e-05, + err_msg='dy_out:\n {}\n st_out:\n{}'.format( + dy_out, st_out)) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py index 0c41621f6e7..5ee2238d2d1 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py @@ -549,9 +549,12 @@ class TestLACModel(unittest.TestCase): def test_train(self): st_out = self.train(self.args, to_static=True) dy_out = self.train(self.args, to_static=False) - self.assertTrue(np.allclose(dy_out, st_out), - msg="dygraph output:\n{},\nstatic output:\n {}.".format( - dy_out, st_out)) + np.testing.assert_allclose( + dy_out, + st_out, + rtol=1e-05, + err_msg='dygraph output:\n{},\nstatic output:\n {}.'.format( + dy_out, st_out)) # Prediction needs trained models, so put `test_predict` at last of `test_train` # self.verify_predict() @@ -564,12 +567,8 @@ class TestLACModel(unittest.TestCase): dy_pre = self.predict_dygraph(batch) st_pre = self.predict_static(batch) dy_jit_pre = self.predict_dygraph_jit(batch) - self.assertTrue(np.allclose(dy_pre, st_pre), - msg="dy_pre:\n {}\n, st_pre: \n{}.".format( - dy_pre, st_pre)) - self.assertTrue(np.allclose(dy_jit_pre, st_pre), - msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format( - dy_jit_pre, st_pre)) + np.testing.assert_allclose(dy_pre, st_pre, rtol=1e-05) + np.testing.assert_allclose(dy_jit_pre, st_pre, rtol=1e-05) def predict_dygraph(self, batch): words, targets, length = batch diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_layer_hook.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_layer_hook.py index b06b01a46fe..b2a23b7d86f 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_layer_hook.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_layer_hook.py @@ -85,12 +85,17 @@ class TestNestLayerHook(unittest.TestCase): st_out = self.train_net(to_static=True) load_out = self.load_train() print(st_out, dy_out, load_out) - self.assertTrue(np.allclose(st_out, dy_out), - msg='dygraph_res is {}\nstatic_res is {}'.format( - dy_out, st_out)) - self.assertTrue(np.allclose(st_out, load_out), - msg='load_out is {}\nstatic_res is {}'.format( - load_out, st_out)) + np.testing.assert_allclose( + st_out, + dy_out, + rtol=1e-05, + err_msg='dygraph_res is {}\nstatic_res is {}'.format( + dy_out, st_out)) + np.testing.assert_allclose( + st_out, + load_out, + rtol=1e-05, + err_msg='load_out is {}\nstatic_res is {}'.format(load_out, st_out)) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py index 386c3a1bd7b..32dae1c4875 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_len.py @@ -68,7 +68,7 @@ class TestLen(unittest.TestCase): def test_len(self): dygraph_res = self._run(to_static=False) static_res = self._run(to_static=True) - self.assertTrue(np.allclose(dygraph_res, static_res)) + np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05) class TestLenWithTensorArray(TestLen): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py index 1d64e7b8184..0122af76772 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_list.py @@ -249,9 +249,11 @@ class TestListWithoutControlFlow(unittest.TestCase): self.assertEqual(len(static_res_list), len(dygraph_res_list)) for stat_res, dy_res in zip(static_res_list, dygraph_res_list): - self.assertTrue( - np.allclose(stat_res, dy_res), - msg='dygraph_res is {}\nstatic_res is {}'.format( + np.testing.assert_allclose( + stat_res, + dy_res, + rtol=1e-05, + err_msg='dygraph_res is {}\nstatic_res is {}'.format( stat_res, dy_res)) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_logical.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_logical.py index 0a510eb81b1..6130541faae 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_logical.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_logical.py @@ -204,9 +204,12 @@ class TestLogicalNot(TestLogicalBase): def test_transformed_result(self): dygraph_res = self._run_dygraph() static_res = self._run_static() - self.assertTrue(np.allclose(dygraph_res, static_res), - msg='dygraph result is {}\nstatic_result is {}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose( + dygraph_res, + static_res, + rtol=1e-05, + err_msg='dygraph result is {}\nstatic_result is {}'.format( + dygraph_res, static_res)) class TestLogicalNot2(TestLogicalBase): @@ -217,9 +220,12 @@ class TestLogicalNot2(TestLogicalBase): def test_transformed_result(self): dygraph_res = self._run_dygraph() static_res = self._run_static() - self.assertTrue(np.allclose(dygraph_res, static_res), - msg='dygraph result is {}\nstatic_result is {}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose( + dygraph_res, + static_res, + rtol=1e-05, + err_msg='dygraph result is {}\nstatic_result is {}'.format( + dygraph_res, static_res)) class TestLogicalAnd(TestLogicalNot): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py index ff3e0da6fea..612e68109de 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py @@ -327,7 +327,7 @@ class TestTransformWhileLoop(unittest.TestCase): static_numpy = self._run_static() dygraph_numpy = self._run_dygraph() print(static_numpy, dygraph_numpy) - self.assertTrue(np.allclose(dygraph_numpy, static_numpy)) + np.testing.assert_allclose(dygraph_numpy, static_numpy, rtol=1e-05) class TestTransformWhileLoopWithoutTensor(TestTransformWhileLoop): @@ -404,7 +404,9 @@ class TestTransformForLoop(unittest.TestCase): return ret.numpy() def test_ast_to_func(self): - self.assertTrue(np.allclose(self._run_dygraph(), self._run_static())) + np.testing.assert_allclose(self._run_dygraph(), + self._run_static(), + rtol=1e-05) class TestTransformForLoop2(TestTransformForLoop): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lstm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lstm.py index 60175851561..d47445c05e9 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lstm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lstm.py @@ -69,9 +69,7 @@ class TestLstm(unittest.TestCase): def test_lstm_to_static(self): dygraph_out = self.run_lstm(to_static=False) static_out = self.run_lstm(to_static=True) - self.assertTrue(np.allclose(dygraph_out, static_out), - msg='dygraph_out is {}\n static_out is \n{}'.format( - dygraph_out, static_out)) + np.testing.assert_allclose(dygraph_out, static_out, rtol=1e-05) def test_save_in_eval(self, with_training=True): paddle.jit.ProgramTranslator().enable(True) @@ -98,15 +96,21 @@ class TestLstm(unittest.TestCase): load_net = paddle.jit.load(model_path) static_out = load_net(x) - self.assertTrue(np.allclose(dygraph_out.numpy(), static_out.numpy()), - msg='dygraph_out is {}\n static_out is \n{}'.format( - dygraph_out, static_out)) + np.testing.assert_allclose( + dygraph_out.numpy(), + static_out.numpy(), + rtol=1e-05, + err_msg='dygraph_out is {}\n static_out is \n{}'.format( + dygraph_out, static_out)) # switch back into train mode. net.train() train_out = net(x) - self.assertTrue(np.allclose(dygraph_out.numpy(), train_out.numpy()), - msg='dygraph_out is {}\n static_out is \n{}'.format( - dygraph_out, train_out)) + np.testing.assert_allclose( + dygraph_out.numpy(), + train_out.numpy(), + rtol=1e-05, + err_msg='dygraph_out is {}\n static_out is \n{}'.format( + dygraph_out, train_out)) def test_save_without_training(self): self.test_save_in_eval(with_training=False) @@ -160,9 +164,12 @@ class TestSaveInEvalMode(unittest.TestCase): eval_out = net(x) infer_out = load_net(x) - self.assertTrue(np.allclose(eval_out.numpy(), infer_out.numpy()), - msg='eval_out is {}\n infer_out is \n{}'.format( - eval_out, infer_out)) + np.testing.assert_allclose( + eval_out.numpy(), + infer_out.numpy(), + rtol=1e-05, + err_msg='eval_out is {}\n infer_out is \n{}'.format( + eval_out, infer_out)) class TestEvalAfterSave(unittest.TestCase): @@ -190,11 +197,11 @@ class TestEvalAfterSave(unittest.TestCase): paddle.jit.save(net, model_path, input_spec=[x]) load_net = paddle.jit.load(model_path) load_out = load_net(x) - self.assertTrue(np.allclose(dy_out.numpy(), load_out.numpy())) + np.testing.assert_allclose(dy_out.numpy(), load_out.numpy(), rtol=1e-05) # eval net.eval() out = net(x) - self.assertTrue(np.allclose(dy_out.numpy(), out.numpy())) + np.testing.assert_allclose(dy_out.numpy(), out.numpy(), rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_amp.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_amp.py index ad4d64d4b9c..d682ef29296 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_amp.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_amp.py @@ -38,9 +38,13 @@ class TestAMP(TestMNIST): # NOTE(Aurelius84): In static AMP training, there is a grep_list but # dygraph AMP don't. It will bring the numbers of cast_op is different # and leads to loss has a bit diff. - self.assertTrue(np.allclose(dygraph_loss, static_loss, atol=1e-3), - msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_loss, static_loss)) + np.testing.assert_allclose( + dygraph_loss, + static_loss, + rtol=1e-05, + atol=0.001, + err_msg='dygraph is {}\n static_res is \n{}'.format( + dygraph_loss, static_loss)) def train(self, to_static=False): paddle.seed(SEED) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_pure_fp16.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_pure_fp16.py index d54231d2c46..bc0742a7898 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_pure_fp16.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_pure_fp16.py @@ -38,9 +38,13 @@ class TestPureFP16(TestMNIST): dygraph_loss = self.train_dygraph() static_loss = self.train_static() # NOTE: In pure fp16 training, loss is not stable, so we enlarge atol here. - self.assertTrue(np.allclose(dygraph_loss, static_loss, atol=1e-3), - msg='dygraph is {}\n static_res is \n{}'.format( - dygraph_loss, static_loss)) + np.testing.assert_allclose( + dygraph_loss, + static_loss, + rtol=1e-05, + atol=0.001, + err_msg='dygraph is {}\n static_res is \n{}'.format( + dygraph_loss, static_loss)) def train(self, to_static=False): np.random.seed(SEED) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_param_guard.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_param_guard.py index 1a4eca95920..8247787b515 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_param_guard.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_param_guard.py @@ -80,16 +80,12 @@ class TestParameterList(unittest.TestCase): def test_parameter_list(self): static_loss = self.train(False, to_static=True) dygraph_loss = self.train(False, to_static=False) - self.assertTrue(np.allclose(dygraph_loss, static_loss), - msg='dygraph result is {}\nstatic result is {}'.format( - dygraph_loss, static_loss)) + np.testing.assert_allclose(dygraph_loss, static_loss, rtol=1e-05) def test_parameter_list_iter(self): static_loss = self.train(True, to_static=True) dygraph_loss = self.train(True, to_static=False) - self.assertTrue(np.allclose(dygraph_loss, static_loss), - msg='dygraph result is {}\nstatic result is {}'.format( - dygraph_loss, static_loss)) + np.testing.assert_allclose(dygraph_loss, static_loss, rtol=1e-05) class NetWithRawParamList(paddle.nn.Layer): @@ -142,9 +138,7 @@ class TestRawParameterList(unittest.TestCase): def test_parameter_list(self): static_loss = self.train(to_static=True) dygraph_loss = self.train(to_static=False) - self.assertTrue(np.allclose(dygraph_loss, static_loss), - msg='dygraph result is {}\nstatic result is {}'.format( - dygraph_loss, static_loss)) + np.testing.assert_allclose(dygraph_loss, static_loss, rtol=1e-05) class NetWithSubLayerParamList(paddle.nn.Layer): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py index 560ae6b4ade..4a62ab82374 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_partial_program.py @@ -89,7 +89,7 @@ class TestWithNestedInput(unittest.TestCase): def test_nest(self): dygraph_res = self._run(to_static=False) static_res = self._run(to_static=True) - self.assertTrue(np.allclose(dygraph_res, static_res)) + np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05) class TestWithNestedOutput(unittest.TestCase): @@ -123,7 +123,9 @@ class TestWithNestedOutput(unittest.TestCase): for dy_var, st_var in zip(dygraph_res, static_res): if isinstance(dy_var, (fluid.core.VarBase, fluid.core.eager.Tensor)): - self.assertTrue(np.allclose(dy_var.numpy(), st_var.numpy())) + np.testing.assert_allclose(dy_var.numpy(), + st_var.numpy(), + rtol=1e-05) else: self.assertTrue(dy_var, st_var) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_program_translator.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_program_translator.py index 27debe00af1..4c36707607f 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_program_translator.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_program_translator.py @@ -234,10 +234,10 @@ class TestEnableDeclarative(unittest.TestCase): with fluid.dygraph.guard(): dygraph_output = self.program_translator.get_output( simple_func, self.x, self.weight) - self.assertTrue( - np.allclose(static_output.numpy(), - dygraph_output.numpy(), - atol=1e-4)) + np.testing.assert_allclose(static_output.numpy(), + dygraph_output.numpy(), + rtol=1e-05, + atol=1e-4) def test_enable_disable_get_func(self): @@ -290,10 +290,10 @@ class TestEnableDeclarative(unittest.TestCase): self.program_translator.enable(False) with fluid.dygraph.guard(): dygraph_output = decorated_simple_func(self.x, self.weight) - self.assertTrue( - np.allclose(static_output.numpy(), - dygraph_output.numpy(), - atol=1e-4)) + np.testing.assert_allclose(static_output.numpy(), + dygraph_output.numpy(), + rtol=1e-05, + atol=1e-4) class Net(fluid.dygraph.layers.Layer): @@ -381,13 +381,13 @@ class TestIfElseEarlyReturn(unittest.TestCase): answer = np.zeros([2, 2]) + 1 static_func = paddle.jit.to_static(dyfunc_with_if_else_early_return1) out = static_func() - self.assertTrue(np.allclose(answer, out[0].numpy())) + np.testing.assert_allclose(answer, out[0].numpy(), rtol=1e-05) def test_ifelse_early_return2(self): answer = np.zeros([2, 2]) + 3 static_func = paddle.jit.to_static(dyfunc_with_if_else_early_return2) out = static_func() - self.assertTrue(np.allclose(answer, out[0].numpy())) + np.testing.assert_allclose(answer, out[0].numpy(), rtol=1e-05) class TestRemoveCommentInDy2St(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py index 75f17e22e46..4adc4bde123 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py @@ -305,15 +305,9 @@ class TestPtb(unittest.TestCase): loss_1, hidden_1, cell_1 = train_static(self.place) loss_2, hidden_2, cell_2 = train_dygraph(self.place) - self.assertTrue(np.allclose(loss_1, loss_2), - msg="static loss: {} \ndygraph loss: {}".format( - loss_1, loss_2)) - self.assertTrue(np.allclose(hidden_1, hidden_2), - msg="static hidden: {} \ndygraph acc1: {}".format( - hidden_1, hidden_2)) - self.assertTrue(np.allclose(cell_1, cell_2), - msg="static cell: {} \ndygraph cell: {}".format( - cell_1, cell_2)) + np.testing.assert_allclose(loss_1, loss_2, rtol=1e-05) + np.testing.assert_allclose(hidden_1, hidden_2, rtol=1e-05) + np.testing.assert_allclose(cell_1, cell_2, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm_v2.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm_v2.py index 5d0d488915d..f98556174c6 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm_v2.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm_v2.py @@ -308,15 +308,9 @@ class TestPtb(unittest.TestCase): loss_1, hidden_1, cell_1 = train_static(self.place) loss_2, hidden_2, cell_2 = train_dygraph(self.place) - self.assertTrue(np.allclose(loss_1, loss_2), - msg="static loss: {} \ndygraph loss: {}".format( - loss_1, loss_2)) - self.assertTrue(np.allclose(hidden_1, hidden_2), - msg="static hidden: {} \ndygraph acc1: {}".format( - hidden_1, hidden_2)) - self.assertTrue(np.allclose(cell_1, cell_2), - msg="static cell: {} \ndygraph cell: {}".format( - cell_1, cell_2)) + np.testing.assert_allclose(loss_1, loss_2, rtol=1e-05) + np.testing.assert_allclose(hidden_1, hidden_2, rtol=1e-05) + np.testing.assert_allclose(cell_1, cell_2, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py index cc373f07e99..99361fa3949 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_reinforcement_learning.py @@ -209,9 +209,7 @@ class TestDeclarative(unittest.TestCase): def test_train(self): st_out = train(self.args, self.place, to_static=True) dy_out = train(self.args, self.place, to_static=False) - self.assertTrue(np.allclose(st_out, dy_out), - msg="dy_out:\n {}\n st_out:\n{}\n".format( - dy_out, st_out)) + np.testing.assert_allclose(st_out, dy_out, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py index 7f78788e596..2905bd07439 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_return.py @@ -253,15 +253,12 @@ class TestReturnBase(unittest.TestCase): self.assertTrue(isinstance(static_res, tuple)) self.assertEqual(len(dygraph_res), len(static_res)) for i in range(len(dygraph_res)): - self.assertTrue( - np.allclose(dygraph_res[i], static_res[i]), - msg='dygraph res is {}\nstatic_res is {}'.format( - dygraph_res[i], static_res[i])) + np.testing.assert_allclose(dygraph_res[i], + static_res[i], + rtol=1e-05) elif isinstance(dygraph_res, np.ndarray): - self.assertTrue(np.allclose(dygraph_res, static_res), - msg='dygraph res is {}\nstatic_res is {}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05) else: self.assertEqual(dygraph_res, static_res) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py index 9549844f59c..fec74bea660 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py @@ -115,7 +115,7 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase): # Check the correctness of the inference infer_out = self.load_and_run_inference(infer_model_dir, model_filename, params_filename, inputs) - self.assertTrue(np.allclose(gt_out, infer_out)) + np.testing.assert_allclose(gt_out, infer_out, rtol=1e-05) def load_and_run_inference(self, model_path, model_filename, params_filename, inputs): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_load.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_load.py index cc75dcd949d..ef76fe6f7c5 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_load.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_load.py @@ -83,8 +83,12 @@ class TestDyToStaticSaveLoad(unittest.TestCase): program_translator.enable(False) dygraph_out, dygraph_loss = dygraph_net(x) - self.assertTrue(np.allclose(dygraph_out.numpy(), static_out.numpy())) - self.assertTrue(np.allclose(dygraph_loss.numpy(), static_loss.numpy())) + np.testing.assert_allclose(dygraph_out.numpy(), + static_out.numpy(), + rtol=1e-05) + np.testing.assert_allclose(dygraph_loss.numpy(), + static_loss.numpy(), + rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py index 16e51784a07..dffc0340a6d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py @@ -473,12 +473,17 @@ class TestSeResnet(unittest.TestCase): st_pre = self.predict_static(image) dy_jit_pre = self.predict_dygraph_jit(image) predictor_pre = self.predict_analysis_inference(image) - self.assertTrue(np.allclose(dy_pre, st_pre), - msg="dy_pre:\n {}\n, st_pre: \n{}.".format( - dy_pre, st_pre)) - self.assertTrue(np.allclose(dy_jit_pre, st_pre), - msg="dy_jit_pre:\n {}\n, st_pre: \n{}.".format( - dy_jit_pre, st_pre)) + np.testing.assert_allclose( + dy_pre, + st_pre, + rtol=1e-05, + err_msg='dy_pre:\n {}\n, st_pre: \n{}.'.format(dy_pre, st_pre)) + np.testing.assert_allclose( + dy_jit_pre, + st_pre, + rtol=1e-05, + err_msg='dy_jit_pre:\n {}\n, st_pre: \n{}.'.format( + dy_jit_pre, st_pre)) flat_st_pre = st_pre.flatten() flat_predictor_pre = np.array(predictor_pre).flatten() @@ -497,18 +502,26 @@ class TestSeResnet(unittest.TestCase): pred_2, loss_2, acc1_2, acc5_2 = self.train(self.train_reader, to_static=True) - self.assertTrue(np.allclose(pred_1, pred_2), - msg="static pred: {} \ndygraph pred: {}".format( - pred_1, pred_2)) - self.assertTrue(np.allclose(loss_1, loss_2), - msg="static loss: {} \ndygraph loss: {}".format( - loss_1, loss_2)) - self.assertTrue(np.allclose(acc1_1, acc1_2), - msg="static acc1: {} \ndygraph acc1: {}".format( - acc1_1, acc1_2)) - self.assertTrue(np.allclose(acc5_1, acc5_2), - msg="static acc5: {} \ndygraph acc5: {}".format( - acc5_1, acc5_2)) + np.testing.assert_allclose( + pred_1, + pred_2, + rtol=1e-05, + err_msg='static pred: {} \ndygraph pred: {}'.format(pred_1, pred_2)) + np.testing.assert_allclose( + loss_1, + loss_2, + rtol=1e-05, + err_msg='static loss: {} \ndygraph loss: {}'.format(loss_1, loss_2)) + np.testing.assert_allclose( + acc1_1, + acc1_2, + rtol=1e-05, + err_msg='static acc1: {} \ndygraph acc1: {}'.format(acc1_1, acc1_2)) + np.testing.assert_allclose( + acc5_1, + acc5_2, + rtol=1e-05, + err_msg='static acc5: {} \ndygraph acc5: {}'.format(acc5_1, acc5_2)) self.verify_predict() diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py index 719645aa2b5..f581f45ca59 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_sentiment.py @@ -353,9 +353,11 @@ class TestSentiment(unittest.TestCase): self.args.model_type = model_type st_out = train(self.args, True) dy_out = train(self.args, False) - self.assertTrue(np.allclose(dy_out, st_out), - msg="dy_out:\n {}\n st_out:\n {}".format( - dy_out, st_out)) + np.testing.assert_allclose(dy_out, + st_out, + rtol=1e-05, + err_msg='dy_out:\n {}\n st_out:\n {}'.format( + dy_out, st_out)) def test_train(self): model_types = ['cnn_net', 'bow_net', 'gru_net', 'bigru_net'] diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py index 7e8390d5443..3a7a1dc1b0e 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py @@ -139,9 +139,7 @@ class TestSliceWithoutControlFlow(unittest.TestCase): def test_transformed_static_result(self): static_res = self.run_static_mode() dygraph_res = self.run_dygraph_mode() - self.assertTrue(np.allclose(dygraph_res, static_res), - msg='dygraph_res is {}\nstatic_res is {}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05) class TestSliceInIf(TestSliceWithoutControlFlow): @@ -283,11 +281,7 @@ class TestPaddleStridedSlice(unittest.TestCase): strides=stride2) array_slice = array[s2[0]:e2[0]:stride2[0], ::, s2[1]:e2[1]:stride2[1]] - np.testing.assert_array_equal( - sl.numpy(), - array_slice, - err_msg='paddle.strided_slice:\n {} \n numpy slice:\n{}'.format( - sl.numpy(), array_slice)) + np.testing.assert_array_equal(sl.numpy(), array_slice) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py index f535cf4c35d..7b587a77728 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py @@ -14,7 +14,7 @@ from __future__ import print_function -import numpy +import numpy as np import paddle import unittest @@ -37,9 +37,7 @@ class TestTensorClone(unittest.TestCase): def test_tensor_clone(self): dygraph_res = self._run(to_static=False) static_res = self._run(to_static=True) - self.assertTrue(numpy.allclose(dygraph_res, static_res), - msg='dygraph res is {}\nstatic_res is {}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05) @paddle.jit.to_static diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_shape.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_shape.py index 0d1dc69823a..14050e86805 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_shape.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_shape.py @@ -14,7 +14,7 @@ from __future__ import print_function -import numpy +import numpy as np import unittest import paddle @@ -39,7 +39,7 @@ def dyfunc_tensor_shape_2(x): def dyfunc_tensor_shape_3(x): # Transform y.shape but run y.shape actually because y is not Tensor x = fluid.dygraph.to_variable(x) - y = numpy.ones(5) + y = np.ones(5) res = fluid.layers.reshape(x, shape=y.shape) return res @@ -200,7 +200,7 @@ def dyfunc_with_while_3(x): def dyfunc_with_while_4(x): x = paddle.to_tensor(x) - y = numpy.ones(5) + y = np.ones(5) y_shape_0 = y.shape[0] i = 1 @@ -235,7 +235,7 @@ def dyfunc_dict_assign_shape(): class TestTensorShapeBasic(unittest.TestCase): def setUp(self): - self.input = numpy.ones(5).astype("int32") + self.input = np.ones(5).astype("int32") self.place = fluid.CUDAPlace( 0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() self._set_input_spec() @@ -265,9 +265,7 @@ class TestTensorShapeBasic(unittest.TestCase): def test_transformed_static_result(self): static_res = self.get_static_output() dygraph_res = self.get_dygraph_output() - self.assertTrue(numpy.allclose(dygraph_res, static_res), - msg='dygraph res is {}\nstatic_res is {}'.format( - dygraph_res, static_res)) + np.testing.assert_allclose(dygraph_res, static_res, rtol=1e-05) def _set_expected_op_num(self): self.expected_op_num = 2 @@ -342,7 +340,7 @@ class TestTensorShapeBasic6(TestTensorShapeBasic): class TestTupleShape1(TestTensorShapeBasic): def init_test_func(self): - self.input = numpy.ones((5, 7)).astype("int32") + self.input = np.ones((5, 7)).astype("int32") self.input_spec = [ paddle.static.InputSpec(shape=[-1, -1], dtype="int32") ] @@ -357,7 +355,7 @@ class TestTupleShape1(TestTensorShapeBasic): class TestTupleShape2(TestTensorShapeBasic): def init_test_func(self): - self.input = numpy.ones((5, 7)).astype("int32") + self.input = np.ones((5, 7)).astype("int32") self.input_spec = [ paddle.static.InputSpec(shape=[-1, 7], dtype="int32") ] @@ -372,7 +370,7 @@ class TestTupleShape2(TestTensorShapeBasic): class TestTupleShape3(TestTensorShapeBasic): def init_test_func(self): - self.input = numpy.ones((5, 7)).astype("int32") + self.input = np.ones((5, 7)).astype("int32") self.input_spec = [paddle.static.InputSpec(shape=[5, 7], dtype="int32")] self.dygraph_func = dyfunc_tuple_shape_3 @@ -385,7 +383,7 @@ class TestTupleShape3(TestTensorShapeBasic): class TestPaddleShapeApi(TestTensorShapeBasic): def init_test_func(self): - self.input = numpy.ones((5, 7)).astype("int32") + self.input = np.ones((5, 7)).astype("int32") self.input_spec = [paddle.static.InputSpec(shape=[5, 7], dtype="int32")] self.dygraph_func = dyfunc_paddle_shape_api @@ -597,7 +595,7 @@ class TestOpNumWithTensorShapeInWhile1(TestOpNumBasicWithTensorShape): class TestChangeShapeAfterAssign(TestTensorShapeBasic): def init_test_func(self): - self.input = numpy.ones((2, 3)).astype("int32") + self.input = np.ones((2, 3)).astype("int32") self.input_spec = [ paddle.static.InputSpec(shape=[-1, 3], dtype="int32") ] diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py index 32bd9bc5d50..f7b16c06cb8 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_transformer.py @@ -395,19 +395,17 @@ class TestTransformer(unittest.TestCase): args, batch_generator = self.prepare(mode='train') static_avg_loss = train_static(args, batch_generator) dygraph_avg_loss = train_dygraph(args, batch_generator) - self.assertTrue(np.allclose(static_avg_loss, dygraph_avg_loss)) + np.testing.assert_allclose(static_avg_loss, + dygraph_avg_loss, + rtol=1e-05) def _test_predict(self): args, batch_generator = self.prepare(mode='test') static_seq_ids, static_scores = predict_static(args, batch_generator) dygraph_seq_ids, dygraph_scores = predict_dygraph(args, batch_generator) - self.assertTrue(np.allclose(static_seq_ids, static_seq_ids), - msg="static_seq_ids: {} \n dygraph_seq_ids: {}".format( - static_seq_ids, dygraph_seq_ids)) - self.assertTrue(np.allclose(static_scores, dygraph_scores), - msg="static_scores: {} \n dygraph_scores: {}".format( - static_scores, dygraph_scores)) + np.testing.assert_allclose(static_seq_ids, static_seq_ids, rtol=1e-05) + np.testing.assert_allclose(static_scores, dygraph_scores, rtol=1e-05) def test_check_result(self): self._test_train() diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py index 15a1db65b94..b14722ac712 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py @@ -347,9 +347,7 @@ class TestTsm(unittest.TestCase): fake_data_reader = FakeDataReader("train", parse_config(args.config)) dygraph_loss = train(args, fake_data_reader, to_static=False) static_loss = train(args, fake_data_reader, to_static=True) - self.assertTrue(np.allclose(dygraph_loss, static_loss), - msg="dygraph_loss: {} \nstatic_loss: {}".format( - dygraph_loss, static_loss)) + np.testing.assert_allclose(dygraph_loss, static_loss, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_typing.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_typing.py index 66b154ee30a..cd2b7aa6e53 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_typing.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_typing.py @@ -101,7 +101,7 @@ class TestTyping(unittest.TestCase): out = self.run_dy() load_net = self.save_and_load('tuple') load_out = load_net(self.x) - self.assertTrue(np.allclose(out, load_out)) + np.testing.assert_allclose(out, load_out, rtol=1e-05) class TestTypingTuple(TestTyping): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_word2vec.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_word2vec.py index f510e2dca6f..05698657492 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_word2vec.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_word2vec.py @@ -301,9 +301,7 @@ class TestWord2Vec(unittest.TestCase): def test_dygraph_static_same_loss(self): dygraph_loss = train(to_static=False) static_loss = train(to_static=True) - self.assertTrue(np.allclose(dygraph_loss, static_loss), - msg="dygraph_loss: {} \nstatic_loss: {}".format( - dygraph_loss, static_loss)) + np.testing.assert_allclose(dygraph_loss, static_loss, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py index ef074447893..0fb791a3a28 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_yolov3.py @@ -168,12 +168,10 @@ class TestYolov3(unittest.TestCase): def test_dygraph_static_same_loss(self): dygraph_loss = train(to_static=False) static_loss = train(to_static=True) - self.assertTrue(np.allclose(dygraph_loss, - static_loss, - atol=1e-5, - rtol=1e-3), - msg="dygraph_loss: {} \nstatic_loss: {}".format( - dygraph_loss, static_loss)) + np.testing.assert_allclose(dygraph_loss, + static_loss, + rtol=0.001, + atol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/fft/test_fft.py b/python/paddle/fluid/tests/unittests/fft/test_fft.py index f7cc9fbf4a1..374db023e29 100644 --- a/python/paddle/fluid/tests/unittests/fft/test_fft.py +++ b/python/paddle/fluid/tests/unittests/fft/test_fft.py @@ -112,12 +112,13 @@ class TestFft(unittest.TestCase): """Test fft with norm condition """ with paddle.fluid.dygraph.guard(self.place): - self.assertTrue( - np.allclose(scipy.fft.fft(self.x, self.n, self.axis, self.norm), - paddle.fft.fft(paddle.to_tensor(self.x), self.n, - self.axis, self.norm), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype)))) + np.testing.assert_allclose(scipy.fft.fft(self.x, self.n, self.axis, + self.norm), + paddle.fft.fft(paddle.to_tensor(self.x), + self.n, self.axis, + self.norm), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype))) @place(DEVICES) @@ -138,13 +139,13 @@ class TestIfft(unittest.TestCase): """Test ifft with norm condition """ with paddle.fluid.dygraph.guard(self.place): - self.assertTrue( - np.allclose(scipy.fft.ifft(self.x, self.n, self.axis, - self.norm), - paddle.fft.ifft(paddle.to_tensor(self.x), self.n, - self.axis, self.norm), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype)))) + np.testing.assert_allclose(scipy.fft.ifft(self.x, self.n, self.axis, + self.norm), + paddle.fft.ifft(paddle.to_tensor(self.x), + self.n, self.axis, + self.norm), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype))) @place(DEVICES) @@ -190,13 +191,13 @@ class TestFft2(unittest.TestCase): """Test fft2 with norm condition """ with paddle.fluid.dygraph.guard(self.place): - self.assertTrue( - np.allclose(scipy.fft.fft2(self.x, self.n, self.axis, - self.norm), - paddle.fft.fft2(paddle.to_tensor(self.x), self.n, - self.axis, self.norm), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype)))) + np.testing.assert_allclose(scipy.fft.fft2(self.x, self.n, self.axis, + self.norm), + paddle.fft.fft2(paddle.to_tensor(self.x), + self.n, self.axis, + self.norm), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype))) @place(DEVICES) @@ -712,13 +713,13 @@ class TestRfft(unittest.TestCase): """Test rfft with norm condition """ with paddle.fluid.dygraph.guard(self.place): - self.assertTrue( - np.allclose(scipy.fft.rfft(self.x, self.n, self.axis, - self.norm), - paddle.fft.rfft(paddle.to_tensor(self.x), self.n, - self.axis, self.norm), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype)))) + np.testing.assert_allclose(scipy.fft.rfft(self.x, self.n, self.axis, + self.norm), + paddle.fft.rfft(paddle.to_tensor(self.x), + self.n, self.axis, + self.norm), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype))) @place(DEVICES) @@ -764,13 +765,12 @@ class TestRfft2(unittest.TestCase): """Test rfft2 with norm condition """ with paddle.fluid.dygraph.guard(self.place): - self.assertTrue( - np.allclose(scipy.fft.rfft2(self.x, self.n, self.axis, - self.norm), - paddle.fft.rfft2(paddle.to_tensor(self.x), self.n, - self.axis, self.norm), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype)))) + np.testing.assert_allclose( + scipy.fft.rfft2(self.x, self.n, self.axis, self.norm), + paddle.fft.rfft2(paddle.to_tensor(self.x), self.n, self.axis, + self.norm), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype))) @place(DEVICES) @@ -821,13 +821,12 @@ class TestRfftn(unittest.TestCase): """Test rfftn with norm condition """ with paddle.fluid.dygraph.guard(self.place): - self.assertTrue( - np.allclose(scipy.fft.rfftn(self.x, self.n, self.axis, - self.norm), - paddle.fft.rfftn(paddle.to_tensor(self.x), self.n, - self.axis, self.norm), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype)))) + np.testing.assert_allclose( + scipy.fft.rfftn(self.x, self.n, self.axis, self.norm), + paddle.fft.rfftn(paddle.to_tensor(self.x), self.n, self.axis, + self.norm), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype))) @place(DEVICES) @@ -980,13 +979,12 @@ class TestIhfftn(unittest.TestCase): """Test ihfftn with norm condition """ with paddle.fluid.dygraph.guard(self.place): - self.assertTrue( - np.allclose(scipy.fft.ihfftn(self.x, self.n, self.axis, - self.norm), - paddle.fft.ihfftn(paddle.to_tensor(self.x), self.n, - self.axis, self.norm), - rtol=RTOL.get(str(self.x.dtype)), - atol=ATOL.get(str(self.x.dtype)))) + np.testing.assert_allclose( + scipy.fft.ihfftn(self.x, self.n, self.axis, self.norm), + paddle.fft.ihfftn(paddle.to_tensor(self.x), self.n, self.axis, + self.norm), + rtol=RTOL.get(str(self.x.dtype)), + atol=ATOL.get(str(self.x.dtype))) @place(DEVICES) diff --git a/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_custom_leaky_relu_ipu.py b/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_custom_leaky_relu_ipu.py index fb3fcbf5fe4..70d51940865 100644 --- a/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_custom_leaky_relu_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/custom_ops/test_custom_leaky_relu_ipu.py @@ -114,8 +114,10 @@ class TestBase(IPUOpTest): res0 = self._test_base(False) res1 = self._test_base(True) - self.assertTrue( - np.allclose(res0.flatten(), res1.flatten(), atol=self.atol)) + np.testing.assert_allclose(res0.flatten(), + res1.flatten(), + rtol=1e-05, + atol=self.atol) self.assertTrue(res0.shape == res1.shape) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_dy2static_fp16_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_dy2static_fp16_ipu.py index 23ba121a07f..098f3d6b2ba 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_dy2static_fp16_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_dy2static_fp16_ipu.py @@ -122,7 +122,7 @@ class TestBase(IPUD2STest): def test_training(self): cpu_loss = self._test(False).flatten() ipu_loss = self._test(True).flatten() - self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-2)) + np.testing.assert_allclose(ipu_loss, cpu_loss, rtol=1e-05, atol=0.01) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py index 8b3e0104c29..5a7d0836d6f 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py @@ -114,7 +114,7 @@ class TestBase(IPUD2STest): def test_training(self): ipu_loss = self._test(True).flatten() cpu_loss = self._test(False).flatten() - self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-4)) + np.testing.assert_allclose(ipu_loss, cpu_loss, rtol=1e-05, atol=1e-4) class TestSaveLoad(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_eval_model_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_eval_model_ipu.py index 5df7bbadebf..f3b1d227d95 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_eval_model_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_eval_model_ipu.py @@ -120,7 +120,10 @@ class TestBase(IPUOpTest): ipu_loss = self._test_optimizer(True).flatten() cpu_loss = self._test_optimizer(False).flatten() self.assertTrue(ipu_loss[0] == ipu_loss[99]) - self.assertTrue(np.allclose(ipu_loss[100:], cpu_loss, atol=self.atol)) + np.testing.assert_allclose(ipu_loss[100:], + cpu_loss, + rtol=1e-05, + atol=self.atol) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_identity_loss_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_identity_loss_ipu.py index 9a44a9e7c30..187c63fef5b 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_identity_loss_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_identity_loss_ipu.py @@ -93,7 +93,10 @@ class TestBase(IPUOpTest): # none cpu_res = self.feed['x'] - self.assertTrue(np.allclose(ipu_res[0], cpu_res, atol=self.atol)) + np.testing.assert_allclose(ipu_res[0], + cpu_res, + rtol=1e-05, + atol=self.atol) def test_base(self): # TODO: use string instead of int for reduction diff --git a/python/paddle/fluid/tests/unittests/ipu/test_inference_model_io_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_inference_model_io_ipu.py index d3a700b6296..4a1aa8cba44 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_inference_model_io_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_inference_model_io_ipu.py @@ -142,7 +142,7 @@ class TestBase(IPUOpTest): cpu_res = self._test_load(False) ipu_res = self._test_load(True) - self.assertTrue(np.allclose(cpu_res, ipu_res, atol=self.atol)) + np.testing.assert_allclose(cpu_res, ipu_res, rtol=1e-05, atol=self.atol) self.attrs['path'].cleanup() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_ipu_shard_api_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_ipu_shard_api_ipu.py index f4a48cf1340..f04a3bb55e5 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_ipu_shard_api_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_ipu_shard_api_ipu.py @@ -59,8 +59,10 @@ class TestIpuShard(unittest.TestCase): def test_ipu_shard(self): ipu_index_list = self._test() expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2] - self.assertTrue( - np.allclose(ipu_index_list, expected_ipu_index_list, atol=0)) + np.testing.assert_allclose(ipu_index_list, + expected_ipu_index_list, + rtol=1e-05, + atol=0) class TestIpuPipeline(unittest.TestCase): @@ -102,8 +104,10 @@ class TestIpuPipeline(unittest.TestCase): ipu_index_list = self._test() expected_ipu_index_list = [1, 2, 3, 1, 2, 1, 2] - self.assertTrue( - np.allclose(ipu_index_list, expected_ipu_index_list, atol=0)) + np.testing.assert_allclose(ipu_index_list, + expected_ipu_index_list, + rtol=1e-05, + atol=0) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_lr_sheduler_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_lr_sheduler_ipu.py index 6c663bd5ac9..137f3f61c53 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_lr_sheduler_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_lr_sheduler_ipu.py @@ -84,7 +84,7 @@ class TestConvNet(IPUOpTest): ipu_loss = self.run_model(True).flatten() cpu_loss = self.run_model(False).flatten() - self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-10)) + np.testing.assert_allclose(ipu_loss, cpu_loss, rtol=1e-05, atol=1e-10) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_matmul_serilize_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_matmul_serilize_ipu.py index 6ffb05dfd25..21aa7c4b992 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_matmul_serilize_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_matmul_serilize_ipu.py @@ -85,8 +85,10 @@ class TestBase(IPUOpTest): def test_base(self): res0 = self.run_model(False) res1 = self.run_model(True) - self.assertTrue( - np.allclose(res0.flatten(), res1.flatten(), atol=self.atol)) + np.testing.assert_allclose(res0.flatten(), + res1.flatten(), + rtol=1e-05, + atol=self.atol) self.assertTrue(res0.shape == res1.shape) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_model_parallel_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_model_parallel_ipu.py index 253a87a6b7f..55a6569b7dd 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_model_parallel_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_model_parallel_ipu.py @@ -126,7 +126,10 @@ class TestBase(IPUOpTest): cpu_outputs = self._test_base(False) ipu_outputs = self._test_base(True) - self.assertTrue(np.allclose(cpu_outputs, ipu_outputs, atol=self.atol)) + np.testing.assert_allclose(cpu_outputs, + ipu_outputs, + rtol=1e-05, + atol=self.atol) class TestReplicaInference(TestBase): @@ -255,7 +258,10 @@ class TestReplicaTrain(TestTrainBase): cpu_outputs = self._test_base(False) ipu_outputs = self._test_base(True)[::2] - self.assertTrue(np.allclose(cpu_outputs, ipu_outputs, atol=self.atol)) + np.testing.assert_allclose(cpu_outputs, + ipu_outputs, + rtol=1e-05, + atol=self.atol) class TestReplicaCollectiveTrain(TestTrainBase): @@ -293,7 +299,10 @@ class TestReplicaCollectiveTrain(TestTrainBase): cpu_outputs = self._test_base(False) ipu_outputs = self._test_base(True)[::2] - self.assertTrue(np.allclose(cpu_outputs, ipu_outputs, atol=self.atol)) + np.testing.assert_allclose(cpu_outputs, + ipu_outputs, + rtol=1e-05, + atol=self.atol) class TestPipelineTrain(TestTrainBase): @@ -322,7 +331,10 @@ class TestPipelineTrain(TestTrainBase): cpu_outputs = self._test_base(False) ipu_outputs = self._test_base(True)[::3] - self.assertTrue(np.allclose(cpu_outputs, ipu_outputs, atol=self.atol)) + np.testing.assert_allclose(cpu_outputs, + ipu_outputs, + rtol=1e-05, + atol=self.atol) class TestAdamTrain(TestTrainBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_optimizer_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_optimizer_ipu.py index 5041e8804a0..f14b2174d39 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_optimizer_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_optimizer_ipu.py @@ -118,7 +118,10 @@ class TestBase(IPUOpTest): ipu_loss = self._test_optimizer(True).flatten() cpu_loss = self._test_optimizer(False).flatten() - self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=self.atol)) + np.testing.assert_allclose(ipu_loss, + cpu_loss, + rtol=1e-05, + atol=self.atol) @unittest.skip('do not support L2 regularization') diff --git a/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py index 33f4a331611..57935c8a657 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py @@ -172,7 +172,7 @@ class TestD2S(IPUD2STest): def test_training(self): ipu_loss = self._test(True).flatten() cpu_loss = self._test(False).flatten() - self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=1e-4)) + np.testing.assert_allclose(ipu_loss, cpu_loss, rtol=1e-05, atol=1e-4) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_save_load_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_save_load_ipu.py index ea4190e251f..1e8387ac228 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_save_load_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_save_load_ipu.py @@ -112,8 +112,10 @@ class TestBase(IPUOpTest): def test_base(self): res0 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP32, True) res1 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP32, False) - self.assertTrue( - np.allclose(res0.flatten(), res1.flatten(), atol=self.atol)) + np.testing.assert_allclose(res0.flatten(), + res1.flatten(), + rtol=1e-05, + atol=self.atol) self.attrs['model_path'].cleanup() @@ -188,8 +190,10 @@ class TestSGDFP16(TestBase): def test_base(self): res0 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP16, True) res1 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP16, False) - self.assertTrue( - np.allclose(res0.flatten(), res1.flatten(), atol=self.atol)) + np.testing.assert_allclose(res0.flatten(), + res1.flatten(), + rtol=1e-05, + atol=self.atol) self.attrs['model_path'].cleanup() diff --git a/python/paddle/fluid/tests/unittests/ipu/test_set_ipu_shard_api.py b/python/paddle/fluid/tests/unittests/ipu/test_set_ipu_shard_api.py index ca1cdb40731..37423fda1fa 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_set_ipu_shard_api.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_set_ipu_shard_api.py @@ -74,8 +74,10 @@ class TestSetIpuShard(unittest.TestCase): ipu_index_list = self._test() expected_ipu_index_list = [1, 1, 2, 3, 3, 3, 4, 4] - self.assertTrue( - np.allclose(ipu_index_list, expected_ipu_index_list, atol=0)) + np.testing.assert_allclose(ipu_index_list, + expected_ipu_index_list, + rtol=1e-05, + atol=0) class TestSetIpuPipeline(unittest.TestCase): @@ -107,8 +109,10 @@ class TestSetIpuPipeline(unittest.TestCase): ipu_index_list = self._test() expected_ipu_index_list = [1, 1, 2, 3, 3, 3, 4, 4] - self.assertTrue( - np.allclose(ipu_index_list, expected_ipu_index_list, atol=0)) + np.testing.assert_allclose(ipu_index_list, + expected_ipu_index_list, + rtol=1e-05, + atol=0) class TestSetIpuShardAndPipeline(unittest.TestCase): @@ -147,8 +151,10 @@ class TestSetIpuShardAndPipeline(unittest.TestCase): 1, 1, 2, 3, 3, 3, 4, 4, 2, 2, 3, 4, 4, 4, 1, 1 ] - self.assertTrue( - np.allclose(ipu_index_list, expected_ipu_index_list, atol=0)) + np.testing.assert_allclose(ipu_index_list, + expected_ipu_index_list, + rtol=1e-05, + atol=0) class TestSetIpuForModel(unittest.TestCase): @@ -182,8 +188,10 @@ class TestSetIpuForModel(unittest.TestCase): 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2 ] - self.assertTrue( - np.allclose(ipu_index_list, expected_ipu_index_list, atol=0)) + np.testing.assert_allclose(ipu_index_list, + expected_ipu_index_list, + rtol=1e-05, + atol=0) class TestSetIpuMixedModel(unittest.TestCase): @@ -234,8 +242,10 @@ class TestSetIpuMixedModel(unittest.TestCase): 1, 1, 2, 2, 2, 3, 4, 4, 2, 2, 3, 3, 3, 4, 1, 1 ] - self.assertTrue( - np.allclose(ipu_index_list, expected_ipu_index_list, atol=0)) + np.testing.assert_allclose(ipu_index_list, + expected_ipu_index_list, + rtol=1e-05, + atol=0) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_varname_inplace_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_varname_inplace_ipu.py index 495bc0d656a..56e7eef4cb2 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_varname_inplace_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_varname_inplace_ipu.py @@ -95,8 +95,10 @@ class TestBase(IPUOpTest): res0 = self._test_base(True) res1 = self._test_base(False) - self.assertTrue( - np.allclose(res0.flatten(), res1.flatten(), atol=self.atol)) + np.testing.assert_allclose(res0.flatten(), + res1.flatten(), + rtol=1e-05, + atol=self.atol) self.assertTrue(res0.shape == res1.shape) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_weight_decay_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_weight_decay_ipu.py index c121e6358d3..60594298321 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_weight_decay_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_weight_decay_ipu.py @@ -121,7 +121,10 @@ class TestBase(IPUOpTest): ipu_loss = self._test_optimizer(True).flatten() cpu_loss = self._test_optimizer(False).flatten() - self.assertTrue(np.allclose(ipu_loss, cpu_loss, atol=self.atol)) + np.testing.assert_allclose(ipu_loss, + cpu_loss, + rtol=1e-05, + atol=self.atol) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ipu/test_weight_sharing_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_weight_sharing_ipu.py index c06880b9808..4fe9ef12733 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_weight_sharing_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_weight_sharing_ipu.py @@ -102,8 +102,10 @@ class TestWeightSharing(IPUOpTest): res0 = self.run_model(False) res1 = self.run_model(True) - self.assertTrue( - np.allclose(res0.flatten(), res1[0].flatten(), atol=self.atol)) + np.testing.assert_allclose(res0.flatten(), + res1[0].flatten(), + rtol=1e-05, + atol=self.atol) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py b/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py index 92f1d2cc840..715a370f53d 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py @@ -136,9 +136,12 @@ class AutoScanTest(unittest.TestCase): "The output shapes are not equal, the baseline shape is " + str(baseline[key].shape) + ', but got ' + str(arr.shape)) diff = abs(baseline[key] - arr) - self.assertTrue( - np.allclose(baseline[key], arr, atol=atol, rtol=rtol), - "Output has diff, Maximum absolute error: {}".format( + np.testing.assert_allclose( + baseline[key], + arr, + rtol=rtol, + atol=atol, + err_msg='Output has diff, Maximum absolute error: {}'.format( np.amax(diff))) @abc.abstractmethod diff --git a/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py b/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py index 91c7a8963c4..5393014fe7d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py @@ -192,9 +192,13 @@ class InferencePassTest(unittest.TestCase): paddle_out = paddle_out.flatten() inference_out = inference_out.flatten() - self.assertTrue( - np.allclose(paddle_out, inference_out, atol=atol), - "Output has diff between inference and training forward at {} ". + np.testing.assert_allclose( + paddle_out, + inference_out, + rtol=1e-05, + atol=atol, + err_msg= + 'Output has diff between inference and training forward at {} '. format(device)) # Check whether the trt results and the GPU results are the same. @@ -220,12 +224,12 @@ class InferencePassTest(unittest.TestCase): paddle_out = paddle_out.flatten() tensorrt_output = tensorrt_output.flatten() - self.assertTrue( - np.allclose(paddle_out, - tensorrt_output, - rtol=rtol, - atol=atol), - "Output has diff between GPU and TensorRT. ") + np.testing.assert_allclose( + paddle_out, + tensorrt_output, + rtol=rtol, + atol=atol, + err_msg='Output has diff between GPU and TensorRT. ') # Check whether the mkldnn results and the CPU results are the same. if (not use_gpu) and self.enable_mkldnn: @@ -240,9 +244,12 @@ class InferencePassTest(unittest.TestCase): if self.enable_mkldnn_bfloat16: atol = 0.01 for paddle_out, mkldnn_output in zip(paddle_outs, mkldnn_outputs): - self.assertTrue( - np.allclose(np.array(paddle_out), mkldnn_output, atol=atol), - "Output has diff between CPU and MKLDNN. ") + np.testing.assert_allclose( + np.array(paddle_out), + mkldnn_output, + rtol=1e-05, + atol=atol, + err_msg='Output has diff between CPU and MKLDNN. ') class TensorRTParam: ''' diff --git a/python/paddle/fluid/tests/unittests/ir/inference/quant_dequant_test.py b/python/paddle/fluid/tests/unittests/ir/inference/quant_dequant_test.py index b42a54e5efe..6516206350b 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/quant_dequant_test.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/quant_dequant_test.py @@ -290,9 +290,13 @@ class QuantDequantTest(unittest.TestCase): paddle_out = paddle_out.flatten() inference_out = inference_out.flatten() - self.assertTrue( - np.allclose(paddle_out, inference_out, atol=atol), - "Output has diff between inference and training forward at {} ". + np.testing.assert_allclose( + paddle_out, + inference_out, + rtol=1e-05, + atol=atol, + err_msg= + 'Output has diff between inference and training forward at {} '. format(device)) # Check whether the trt results and the GPU results are the same. @@ -319,12 +323,12 @@ class QuantDequantTest(unittest.TestCase): paddle_out = paddle_out.flatten() tensorrt_output = tensorrt_output.flatten() - self.assertTrue( - np.allclose(paddle_out, - tensorrt_output, - rtol=rtol, - atol=atol), - "Output has diff between GPU and TensorRT. ") + np.testing.assert_allclose( + paddle_out, + tensorrt_output, + rtol=rtol, + atol=atol, + err_msg='Output has diff between GPU and TensorRT. ') # Check whether the mkldnn results and the CPU results are the same. if (not use_gpu) and self.enable_mkldnn: @@ -339,9 +343,12 @@ class QuantDequantTest(unittest.TestCase): if self.enable_mkldnn_bfloat16: atol = 0.01 for paddle_out, mkldnn_output in zip(paddle_outs, mkldnn_outputs): - self.assertTrue( - np.allclose(np.array(paddle_out), mkldnn_output, atol=atol), - "Output has diff between CPU and MKLDNN. ") + np.testing.assert_allclose( + np.array(paddle_out), + mkldnn_output, + rtol=1e-05, + atol=atol, + err_msg='Output has diff between CPU and MKLDNN. ') class TensorRTParam: ''' diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multiclass_nms3.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multiclass_nms3.py index 03260a22416..8394a3b7069 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multiclass_nms3.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_multiclass_nms3.py @@ -163,9 +163,12 @@ class TrtConvertMulticlassNMS3Test(TrtLayerAutoScanTest): "The output shapes are not equal, the baseline shape is " + str(basline_arr.shape) + ', but got ' + str(arr.shape)) diff = abs(basline_arr - arr) - self.assertTrue( - np.allclose(basline_arr, arr, atol=atol, rtol=rtol), - "Output has diff, Maximum absolute error: {}".format( + np.testing.assert_allclose( + basline_arr, + arr, + rtol=rtol, + atol=atol, + err_msg='Output has diff, Maximum absolute error: {}'.format( np.amax(diff))) def assert_op_size(self, trt_engine_num, paddle_op_num): diff --git a/python/paddle/fluid/tests/unittests/ir/test_fuse_resnet_unit.py b/python/paddle/fluid/tests/unittests/ir/test_fuse_resnet_unit.py index 16f9ecad6d2..40697f0a6e3 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_fuse_resnet_unit.py +++ b/python/paddle/fluid/tests/unittests/ir/test_fuse_resnet_unit.py @@ -62,4 +62,7 @@ class TestFuseResNetUnit(unittest.TestCase): feed = {"x": np.random.randn(1, 64, 64, 8).astype("float16")} before_out = exe.run(program, feed=feed, fetch_list=[out.name]) after_out = exe.run(after_program, feed=feed, fetch_list=[out.name]) - self.assertTrue(np.allclose(before_out[0], after_out[0], atol=5e-3)) + np.testing.assert_allclose(before_out[0], + after_out[0], + rtol=1e-05, + atol=0.005) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_generate_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_generate_pass.py index 7c6ab5d9462..e4d8f1b32a7 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_generate_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_generate_pass.py @@ -291,7 +291,7 @@ class TestGeneratePass(unittest.TestCase): after_out = executor.run(after_program, feed=feed, fetch_list=[out.name]) - self.assertTrue(np.allclose(before_out, after_out)) + np.testing.assert_allclose(before_out, after_out, rtol=1e-05) def test_multi_add_to_sum(self): paddle.enable_static() @@ -327,8 +327,8 @@ class TestGeneratePass(unittest.TestCase): after_out1, after_out2 = executor.run(after_program, feed=feed, fetch_list=[out1.name, out2.name]) - self.assertTrue(np.allclose(before_out1, after_out1)) - self.assertTrue(np.allclose(before_out2, after_out2)) + np.testing.assert_allclose(before_out1, after_out1, rtol=1e-05) + np.testing.assert_allclose(before_out2, after_out2, rtol=1e-05) def test_generate_combine_mul_v2(self): helper = ir.RegisterPassHelper([generate_combine_mul_v2()]) @@ -369,7 +369,7 @@ class TestGeneratePass(unittest.TestCase): after_out = executor.run(after_program, feed=feed, fetch_list=[out.name]) - self.assertTrue(np.allclose(before_out, after_out)) + np.testing.assert_allclose(before_out, after_out, rtol=1e-05) def test_generate_simplify_inference(self): self.check_generate_simplify_inference("generate_simplify_inference_v1") @@ -405,4 +405,4 @@ class TestGeneratePass(unittest.TestCase): after_out = executor.run(after_program, feed=feed, fetch_list=[out.name]) - self.assertTrue(np.allclose(before_out, after_out)) + np.testing.assert_allclose(before_out, after_out, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_bf16_mkldnn_op.py index 2cad7cd8cc7..c1a90bcf1ac 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_bf16_mkldnn_op.py @@ -38,8 +38,11 @@ _set_use_system_allocator(True) class TestLayerNormBF16MKLDNNOp(TestLayerNormMKLDNNOp): def __assert_close(self, tensor, np_array, msg, rtol=2e-02, atol=2): - self.assertTrue( - np.allclose(np.array(tensor), np_array, rtol=rtol, atol=atol), msg) + np.testing.assert_allclose(np.array(tensor), + np_array, + rtol=rtol, + atol=atol, + err_msg=msg) def check_forward(self, shape, diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_mkldnn_op.py index 98e44f8f745..7ad3b0a88a2 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_mkldnn_op.py @@ -57,7 +57,11 @@ class TestLayerNormMKLDNNOp(unittest.TestCase): self.use_mkldnn = True def __assert_close(self, tensor, np_array, msg, atol=1e-4): - self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) + np.testing.assert_allclose(np.array(tensor), + np_array, + rtol=1e-05, + atol=atol, + err_msg=msg) def check_forward(self, shape, diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_requantize_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_requantize_mkldnn_op.py index 336ee80c1fc..04a71ee2d75 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_requantize_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_requantize_mkldnn_op.py @@ -360,8 +360,10 @@ class TestReQuantizeOpReused(TestReQuantizeOp): feed={'input': variables['input']}, fetch_list=['output']) - self.assertTrue(np.allclose(variables['output'], out[0], atol=1e-4), - 'output') + np.testing.assert_allclose(variables['output'], + out[0], + rtol=1e-05, + atol=1e-4) # ---------------test reused requantize op, no shift------------------------ diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_sum_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_sum_mkldnn_op.py index 33d9af4e0e2..4c1d81b3c5b 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_sum_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_sum_mkldnn_op.py @@ -80,9 +80,13 @@ class TestMKLDNNSumInplaceOp(unittest.TestCase): sum_op.run(scope, place) out = scope.find_var("x0").get_tensor() out_array = np.array(out) - self.assertTrue( - np.allclose(expected_out, out_array, atol=1e-5), - "Inplace sum_mkldnn_op output has diff with expected output") + np.testing.assert_allclose( + expected_out, + out_array, + rtol=1e-05, + atol=1e-05, + err_msg='Inplace sum_mkldnn_op output has diff with expected output' + ) def test_check_grad(self): pass diff --git a/python/paddle/fluid/tests/unittests/mlu/test_arg_max_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_arg_max_op_mlu.py index 45a79b5ece5..f54667df53f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_arg_max_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_arg_max_op_mlu.py @@ -345,8 +345,9 @@ class TestArgMaxAPI(unittest.TestCase): tensor_input = paddle.to_tensor(numpy_input) numpy_output = np.argmax(numpy_input, axis=self.axis) paddle_output = paddle.argmax(tensor_input, axis=self.axis) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) paddle.enable_static() for place in self.place: @@ -378,8 +379,9 @@ class TestArgMaxAPI_2(unittest.TestCase): paddle_output = paddle.argmax(tensor_input, axis=self.axis, keepdim=self.keep_dims) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() @@ -407,8 +409,9 @@ class TestArgMaxAPI_3(unittest.TestCase): tensor_input = paddle.to_tensor(numpy_input) numpy_output = np.argmax(numpy_input).reshape([1]) paddle_output = paddle.argmax(tensor_input) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py index b4f58a7c5f0..27d25c6146e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py @@ -199,8 +199,10 @@ class TestBatchNormChannelLast(unittest.TestCase): channel_first_x = paddle.transpose(x, [0, 2, 1]) y2 = net2(channel_first_x) y2 = paddle.transpose(y2, [0, 2, 1]) - self.assertEqual( - np.allclose(y1.numpy(), y2.numpy(), atol=1e-07), True) + np.testing.assert_allclose(y1.numpy(), + y2.numpy(), + rtol=1e-05, + atol=1e-07) def test_2d(self): for p in self.places: @@ -214,8 +216,10 @@ class TestBatchNormChannelLast(unittest.TestCase): channel_first_x = paddle.transpose(x, [0, 3, 1, 2]) y2 = net2(channel_first_x) y2 = paddle.transpose(y2, [0, 2, 3, 1]) - self.assertEqual( - np.allclose(y1.numpy(), y2.numpy(), atol=1e-07), True) + np.testing.assert_allclose(y1.numpy(), + y2.numpy(), + rtol=1e-05, + atol=1e-07) def test_3d(self): for p in self.places: @@ -229,8 +233,10 @@ class TestBatchNormChannelLast(unittest.TestCase): channel_first_x = paddle.transpose(x, [0, 4, 1, 2, 3]) y2 = net2(channel_first_x) y2 = paddle.transpose(y2, [0, 2, 3, 4, 1]) - self.assertEqual( - np.allclose(y1.numpy(), y2.numpy(), atol=1e-07), True) + np.testing.assert_allclose(y1.numpy(), + y2.numpy(), + rtol=1e-05, + atol=1e-07) # res = np.allclose(y1.numpy(), y2.numpy()) # if res == False: # np.savetxt("./y1.txt", y1.numpy().flatten(), fmt='%.10f', delimiter='\n') @@ -270,7 +276,7 @@ class TestBatchNormUseGlobalStats(unittest.TestCase): net2.training = False y1 = net1(x) y2 = net2(x) - self.assertEqual(np.allclose(y1.numpy(), y2.numpy()), True) + np.testing.assert_allclose(y1.numpy(), y2.numpy(), rtol=1e-05) class TestBatchNormUseGlobalStatsCase1(TestBatchNormUseGlobalStats): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py index acc711ffdbd..cf660a372ad 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_gaussian_random_op_mlu.py @@ -65,12 +65,7 @@ class TestGaussianRandomOp(OpTest): hist2, _ = np.histogram(data, range=(-3, 5)) hist2 = hist2.astype("float32") hist2 /= float(outs[0].size) - np.testing.assert_allclose(hist, - hist2, - rtol=0, - atol=0.01, - err_msg="hist: " + str(hist) + " hist2: " + - str(hist2)) + np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01) class TestMeanStdAreInt(TestGaussianRandomOp): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py index b8272e3bce9..1e4a9726ce8 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py @@ -511,10 +511,9 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): place=place, use_amp=use_amp, use_multi_tensor=True) output2, params2 = self._momentum_optimize_dygraph( place=place, use_amp=use_amp, use_multi_tensor=False) - self.assertEqual(np.allclose(output1, output2, rtol=1e-05), True) + np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): - self.assertEqual( - np.allclose(params1[idx], params2[idx], rtol=1e-05), True) + np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) def _check_with_param_arrt(self, place, use_amp): output1, params1 = self._momentum_optimize_dygraph( @@ -527,10 +526,9 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): use_amp=use_amp, use_param_attr=True, use_multi_tensor=False) - self.assertEqual(np.allclose(output1, output2, rtol=1e-05), True) + np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): - self.assertEqual( - np.allclose(params1[idx], params2[idx], rtol=1e-05), True) + np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) def _check_with_param_group(self, place, use_amp): output1, params1 = self._momentum_optimize_dygraph( @@ -543,10 +541,9 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): use_amp=use_amp, use_param_group=True, use_multi_tensor=False) - self.assertEqual(np.allclose(output1, output2, rtol=1e-05), True) + np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): - self.assertEqual( - np.allclose(params1[idx], params2[idx], rtol=1e-05), True) + np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) def test_main(self): for place in self._get_places(): @@ -619,8 +616,7 @@ class TestMultiTensorMomentumStatic(unittest.TestCase): use_amp=use_amp, use_multi_tensor=False) for idx in range(len(output1)): - self.assertEqual( - np.allclose(output1[idx], output2[idx], rtol=1e-05), True) + np.testing.assert_allclose(output1[idx], output2[idx], rtol=1e-05) def test_main(self): for place in self._get_places(): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py index 766b88aa154..969b66179f5 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py @@ -149,7 +149,7 @@ class TestSoftmaxAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_softmax(self.x_np, axis=-1, dtype=None) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_check(self): paddle.disable_static(self.place) @@ -161,7 +161,7 @@ class TestSoftmaxAPI(unittest.TestCase): out2 = m(x) out_ref = ref_softmax(self.x_np, axis=-1, dtype=None) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) out1 = self.softmax(x, axis=0) x = paddle.to_tensor(self.x_np) @@ -169,11 +169,11 @@ class TestSoftmaxAPI(unittest.TestCase): out2 = m(x) out_ref = ref_softmax(self.x_np, axis=0, dtype=None) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) out = self.softmax(x, dtype=np.float32) out_ref = ref_softmax(self.x_np, axis=-1, dtype=np.float32) - self.assertEqual(np.allclose(out_ref, out.numpy()), True) + np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_extra.py b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_extra.py index 400d2f4afed..cafc83ae75e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_extra.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_extra.py @@ -155,10 +155,12 @@ class TestConvertSyncBatchNormCase2(unittest.TestCase): x = paddle.to_tensor(data) bn_out = bn_model(x) sybn_out = sybn_model(x) - self.assertTrue( - np.allclose(bn_out.numpy(), sybn_out.numpy()), - "Output has diff. \n" + "\nBN " + str(bn_out.numpy()) + - "\n" + "Sync BN " + str(sybn_out.numpy())) + np.testing.assert_allclose( + bn_out.numpy(), + sybn_out.numpy(), + rtol=1e-05, + err_msg='Output has diff. \n' + '\nBN ' + + str(bn_out.numpy()) + '\n' + 'Sync BN ' + str(sybn_out.numpy())) class TestDygraphSyncBatchNormDataFormatError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_uniform_random_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_uniform_random_op_mlu.py index ca1b7b3e602..35a3546a354 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_uniform_random_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_uniform_random_op_mlu.py @@ -71,11 +71,7 @@ class TestMLUUniformRandomOp(OpTest): def verify_output(self, outs): hist, prob = self.output_hist(np.array(outs[0])) - np.testing.assert_allclose(hist, - prob, - rtol=0, - atol=0.01, - err_msg="hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestMLUUniformRandomOpSelectedRows(unittest.TestCase): @@ -103,11 +99,7 @@ class TestMLUUniformRandomOpSelectedRows(unittest.TestCase): op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) hist, prob = output_hist(np.array(out.get_tensor())) - np.testing.assert_allclose(hist, - prob, - rtol=0, - atol=0.01, - err_msg="hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/npu/test_arg_max_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_arg_max_op_npu.py index 12da1794e4c..9c812d6b857 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_arg_max_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_arg_max_op_npu.py @@ -313,8 +313,9 @@ class TestArgMaxAPI(unittest.TestCase): tensor_input = paddle.to_tensor(numpy_input) numpy_output = np.argmax(numpy_input, axis=self.axis) paddle_output = paddle.argmax(tensor_input, axis=self.axis) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) paddle.enable_static() for place in self.place: @@ -346,8 +347,9 @@ class TestArgMaxAPI_2(unittest.TestCase): paddle_output = paddle.argmax(tensor_input, axis=self.axis, keepdim=self.keep_dims) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() @@ -375,8 +377,9 @@ class TestArgMaxAPI_3(unittest.TestCase): tensor_input = paddle.to_tensor(numpy_input) numpy_output = np.argmax(numpy_input).reshape([1]) paddle_output = paddle.argmax(tensor_input) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/npu/test_arg_min_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_arg_min_op_npu.py index b129c673c32..b42bdffd1de 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_arg_min_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_arg_min_op_npu.py @@ -254,8 +254,9 @@ class TestArgMinAPI(unittest.TestCase): tensor_input = paddle.to_tensor(numpy_input) numpy_output = np.argmin(numpy_input, axis=self.axis) paddle_output = paddle.argmin(tensor_input, axis=self.axis) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) paddle.enable_static() for place in self.place: @@ -287,8 +288,9 @@ class TestArgMaxAPI_2(unittest.TestCase): paddle_output = paddle.argmin(tensor_input, axis=self.axis, keepdim=self.keep_dims) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py index 581b0793af2..91294f6dcd6 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_assign_value_op_npu.py @@ -99,10 +99,7 @@ class TestAssignApi(unittest.TestCase): exe = fluid.Executor(self.place) [fetched_x] = exe.run(main_program, feed={}, fetch_list=[x]) - np.testing.assert_allclose(fetched_x, - self.value, - err_msg="fetch_x=%s val=%s" % - (fetched_x, self.value)) + np.testing.assert_allclose(fetched_x, self.value) self.assertEqual(fetched_x.dtype, self.value.dtype) diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py index 763f5db52b2..78f276bfe92 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py @@ -178,7 +178,7 @@ class TestRemainderOp(unittest.TestCase): y = paddle.to_tensor(np_y) z = x % y z_expected = np.array([-0.9, 1.5, 1.3, -1.1]) - self.assertEqual(np.allclose(z_expected, z.numpy()), True) + np.testing.assert_allclose(z_expected, z.numpy(), rtol=1e-05) np_x = np.array([-3, 11, -2, 3]) np_y = np.array([-1, 2, 3, -2]) @@ -186,7 +186,7 @@ class TestRemainderOp(unittest.TestCase): y = paddle.to_tensor(np_y, dtype="int64") z = x % y z_expected = np.array([0, 1, 1, -1]) - self.assertEqual(np.allclose(z_expected, z.numpy()), True) + np.testing.assert_allclose(z_expected, z.numpy(), rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py index 8feca4805bc..975806263e7 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py @@ -77,12 +77,7 @@ class TestMultinomialOp(OpTest): # normalize the input to get the probability prob = self.input_np / self.input_np.sum(axis=-1, keepdims=True) sample_prob = self.sample_output(np.array(outs[0])) - np.testing.assert_allclose(sample_prob, - prob, - rtol=0, - atol=0.01, - err_msg="sample_prob: " + str(sample_prob) + - "\nprob: " + str(prob)) + np.testing.assert_allclose(sample_prob, prob, rtol=0, atol=0.01) class TestMultinomialOp2(TestMultinomialOp): @@ -125,12 +120,7 @@ class TestMultinomialApi(unittest.TestCase): sample_prob = sample_output_one_dimension(out.numpy(), 4) prob = x_numpy / x_numpy.sum(axis=-1, keepdims=True) - np.testing.assert_allclose(sample_prob, - prob, - rtol=0, - atol=0.01, - err_msg="sample_prob: " + str(sample_prob) + - "\nprob: " + str(prob)) + np.testing.assert_allclose(sample_prob, prob, rtol=0, atol=0.01) paddle.enable_static() def test_dygraph2(self): @@ -143,12 +133,7 @@ class TestMultinomialApi(unittest.TestCase): sample_prob = sample_output_two_dimension(out.numpy(), [3, 4]) prob = x_numpy / x_numpy.sum(axis=-1, keepdims=True) - np.testing.assert_allclose(sample_prob, - prob, - rtol=0, - atol=0.01, - err_msg="sample_prob: " + str(sample_prob) + - "\nprob: " + str(prob)) + np.testing.assert_allclose(sample_prob, prob, rtol=0, atol=0.01) paddle.enable_static() def test_dygraph3(self): @@ -191,12 +176,7 @@ class TestMultinomialApi(unittest.TestCase): sample_prob = sample_output_one_dimension(out, 4) prob = x_np / x_np.sum(axis=-1, keepdims=True) - np.testing.assert_allclose(sample_prob, - prob, - rtol=0, - atol=0.01, - err_msg="sample_prob: " + str(sample_prob) + - "\nprob: " + str(prob)) + np.testing.assert_allclose(sample_prob, prob, rtol=0, atol=0.01) class TestMultinomialAlias(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py index 450cb542943..b004052f396 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py @@ -105,7 +105,7 @@ class TestTakeAlongAxisAPI(unittest.TestCase): out_ref = np.array( np.take_along_axis(self.x_np, self.index_np, self.axis)) for out in res: - self.assertEqual(np.allclose(out, out_ref, rtol=1e-03), True) + np.testing.assert_allclose(out, out_ref, rtol=0.001) def test_api_dygraph(self): paddle.disable_static(self.place) @@ -114,7 +114,7 @@ class TestTakeAlongAxisAPI(unittest.TestCase): out = paddle.take_along_axis(x_tensor, self.index, self.axis) out_ref = np.array( np.take_along_axis(self.x_np, self.index_np, self.axis)) - self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-03), True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.001) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py index 30d4b82c7f4..26329fd52ba 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_uniform_random_op_npu.py @@ -71,11 +71,7 @@ class TestNPUUniformRandomOp(OpTest): def verify_output(self, outs): hist, prob = self.output_hist(np.array(outs[0])) - np.testing.assert_allclose(hist, - prob, - rtol=0, - atol=0.01, - err_msg="hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestNPUUniformRandomOpSelectedRows(unittest.TestCase): @@ -103,11 +99,7 @@ class TestNPUUniformRandomOpSelectedRows(unittest.TestCase): op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) hist, prob = output_hist(np.array(out.get_tensor())) - np.testing.assert_allclose(hist, - prob, - rtol=0, - atol=0.01, - err_msg="hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 1bebbe15465..bed1fe8841b 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -728,11 +728,14 @@ class OpTest(unittest.TestCase): for name in api_outs: np_api = np.array(api_outs[name]) np_dyg = np.array(dygraph_outs[name]) - self.assertTrue( - np.allclose(np_api, np_dyg, equal_nan=False), - "Output (" + name + ") has diff at " + str(place) + - "\nExpect " + str(np_dyg) + "\n" + "But Got" + str(np_api) + - " in class " + self.__class__.__name__) + np.testing.assert_allclose( + np_api, + np_dyg, + rtol=1e-05, + equal_nan=False, + err_msg='Output (' + name + ') has diff at ' + str(place) + + '\nExpect ' + str(np_dyg) + '\n' + 'But Got' + str(np_api) + + ' in class ' + self.__class__.__name__) def _calc_python_api_output(self, place, egr_inps=None, egr_oups=None): """ set egr_inps and egr_oups = None if you want to create it by yourself. @@ -1041,12 +1044,15 @@ class OpTest(unittest.TestCase): expect_out = np.array(expect_outs[i]) actual_out = np.array(actual_outs[i]) if inplace_atol is not None: - self.assertTrue( - np.allclose(expect_out, actual_out, atol=inplace_atol), - "Output (" + name + ") has diff at " + str(place) + - " when using and not using inplace" + "\nExpect " + - str(expect_out) + "\n" + "But Got" + str(actual_out) + - " in class " + self.__class__.__name__) + np.testing.assert_allclose( + expect_out, + actual_out, + rtol=1e-05, + atol=inplace_atol, + err_msg='Output (' + name + ') has diff at ' + str(place) + + ' when using and not using inplace' + '\nExpect ' + + str(expect_out) + '\n' + 'But Got' + str(actual_out) + + ' in class ' + self.__class__.__name__) else: np.testing.assert_array_equal( expect_out, diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index cdb01b4c994..eeb447e8ffe 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -132,7 +132,7 @@ class TestExpm1API(unittest.TestCase): exe = paddle.static.Executor(place) res = exe.run(feed={'X': self.x}) for r in res: - self.assertEqual(np.allclose(self.out_ref, r), True) + np.testing.assert_allclose(self.out_ref, r, rtol=1e-05) for place in self.place: run(place) @@ -143,7 +143,7 @@ class TestExpm1API(unittest.TestCase): paddle.disable_static(place) X = paddle.to_tensor(self.x) out = paddle.expm1(X) - self.assertEqual(np.allclose(self.out_ref, out.numpy()), True) + np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05) paddle.enable_static() for place in self.place: @@ -168,7 +168,7 @@ class TestParameter(object): exe = fluid.Executor(place) result, = exe.run(feed={"X": np_x}, fetch_list=[out]) expected = eval("np.%s(np_x)" % self.op_type) - self.assertTrue(np.allclose(result, expected)) + np.testing.assert_allclose(result, expected, rtol=1e-05) def test_dygraph(self): with fluid.dygraph.guard(): @@ -176,7 +176,7 @@ class TestParameter(object): x = fluid.dygraph.to_variable(np_x) z = eval("paddle.%s(x).numpy()" % self.op_type) z_expected = eval("np.%s(np_x)" % self.op_type) - self.assertTrue(np.allclose(z, z_expected)) + np.testing.assert_allclose(z, z_expected, rtol=1e-05) class TestSigmoid(TestActivation): @@ -270,7 +270,7 @@ class TestSiluAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = self.x_np / (1 + np.exp(-self.x_np)) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -280,7 +280,7 @@ class TestSiluAPI(unittest.TestCase): out2 = m(x) out_ref = self.x_np / (1 + np.exp(-self.x_np)) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_errors(self): @@ -337,7 +337,7 @@ class TestLogSigmoidAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = np.log(1 / (1 + np.exp(-self.x_np))) for r in res: - self.assertTrue(np.allclose(out_ref, r)) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -347,7 +347,7 @@ class TestLogSigmoidAPI(unittest.TestCase): out2 = m(x) out_ref = np.log(1 / (1 + np.exp(-self.x_np))) for r in [out1, out2]: - self.assertTrue(np.allclose(out_ref, r.numpy())) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -358,7 +358,7 @@ class TestLogSigmoidAPI(unittest.TestCase): exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = np.log(1 / (1 + np.exp(-self.x_np))) - self.assertTrue(np.allclose(out_ref, res[0])) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): paddle.enable_static() @@ -425,7 +425,7 @@ class TestTanhAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = np.tanh(self.x_np) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -436,7 +436,7 @@ class TestTanhAPI(unittest.TestCase): out3 = th(x) out_ref = np.tanh(self.x_np) for r in [out1, out2, out3]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -447,7 +447,7 @@ class TestTanhAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = np.tanh(self.x_np) - self.assertEqual(np.allclose(out_ref, res[0]), True) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): paddle.enable_static() @@ -699,7 +699,7 @@ class TestTanhshrinkAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_tanhshrink(self.x_np) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -709,7 +709,7 @@ class TestTanhshrinkAPI(unittest.TestCase): out2 = tanhshrink(x) out_ref = ref_tanhshrink(self.x_np) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -720,7 +720,7 @@ class TestTanhshrinkAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_tanhshrink(self.x_np) - self.assertEqual(np.allclose(out_ref, res[0]), True) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): paddle.enable_static() @@ -795,7 +795,7 @@ class TestHardShrinkAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_hardshrink(self.x_np, 0.5) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -805,14 +805,14 @@ class TestHardShrinkAPI(unittest.TestCase): out2 = hd(x) out_ref = ref_hardshrink(self.x_np, 0.5) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) out1 = F.hardshrink(x, 0.6) hd = paddle.nn.Hardshrink(0.6) out2 = hd(x) out_ref = ref_hardshrink(self.x_np, 0.6) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -823,7 +823,7 @@ class TestHardShrinkAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_hardshrink(self.x_np, 0.5) - self.assertEqual(np.allclose(out_ref, res[0]), True) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): paddle.enable_static() @@ -869,7 +869,7 @@ class TestHardtanhAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_hardtanh(self.x_np) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -879,14 +879,14 @@ class TestHardtanhAPI(unittest.TestCase): out2 = m(x) out_ref = ref_hardtanh(self.x_np) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) out1 = F.hardtanh(x, -2.0, 2.0) m = paddle.nn.Hardtanh(-2.0, 2.0) out2 = m(x) out_ref = ref_hardtanh(self.x_np, -2.0, 2.0) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_errors(self): @@ -956,7 +956,7 @@ class TestSoftshrinkAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_softshrink(self.x_np, self.threshold) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -966,7 +966,7 @@ class TestSoftshrinkAPI(unittest.TestCase): out2 = softshrink(x) out_ref = ref_softshrink(self.x_np, self.threshold) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -977,7 +977,7 @@ class TestSoftshrinkAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_softshrink(self.x_np, self.threshold) - self.assertEqual(np.allclose(out_ref, res[0]), True) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): paddle.enable_static() @@ -1188,7 +1188,7 @@ class TestTan(TestActivation): x = paddle.to_tensor(self.x_np) out_test = paddle.tan(x) out_ref = np.tan(self.x_np) - self.assertTrue(np.allclose(out_ref, out_test.numpy())) + np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05) paddle.enable_static() def test_static_api(self): @@ -1199,7 +1199,7 @@ class TestTan(TestActivation): exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = np.tan(self.x_np) - self.assertTrue(np.allclose(out_ref, res[0])) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_backward(self): test_data_shape = [11, 17] @@ -1398,7 +1398,7 @@ class TestReluAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = np.maximum(self.x_np, 0) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -1408,7 +1408,7 @@ class TestReluAPI(unittest.TestCase): out2 = self.relu(x) out_ref = np.maximum(self.x_np, 0) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_errors(self): @@ -1504,7 +1504,7 @@ class TestLeakyReluAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_leaky_relu(self.x_np) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -1514,14 +1514,14 @@ class TestLeakyReluAPI(unittest.TestCase): out2 = m(x) out_ref = ref_leaky_relu(self.x_np) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) out1 = F.leaky_relu(x, 0.6) m = paddle.nn.LeakyReLU(0.6) out2 = m(x) out_ref = ref_leaky_relu(self.x_np, 0.6) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -1532,7 +1532,7 @@ class TestLeakyReluAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_leaky_relu(self.x_np) - self.assertEqual(np.allclose(out_ref, res[0]), True) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): paddle.enable_static() @@ -1619,7 +1619,7 @@ class TestGELUAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = gelu(self.x_np, False) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -1629,14 +1629,14 @@ class TestGELUAPI(unittest.TestCase): out2 = m(x) out_ref = gelu(self.x_np, False) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) out1 = F.gelu(x, True) m = paddle.nn.GELU(True) out2 = m(x) out_ref = gelu(self.x_np, True) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_errors(self): @@ -1703,12 +1703,12 @@ class TestBreluAPI(unittest.TestCase): out = paddle.fluid.layers.brelu(x) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) - self.assertTrue(np.allclose(self.out_ref, res[0])) + np.testing.assert_allclose(self.out_ref, res[0], rtol=1e-05) paddle.disable_static(self.place) x = paddle.to_tensor(self.x_np) out = paddle.fluid.layers.brelu(x) - self.assertTrue(np.allclose(self.out_ref, out.numpy())) + np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05) paddle.enable_static() def test_errors(self): @@ -1774,7 +1774,7 @@ class TestRelu6API(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_relu6(self.x_np) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -1784,7 +1784,7 @@ class TestRelu6API(unittest.TestCase): out2 = relu6(x) out_ref = ref_relu6(self.x_np) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -1795,7 +1795,7 @@ class TestRelu6API(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_relu6(self.x_np) - self.assertEqual(np.allclose(out_ref, res[0]), True) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): paddle.enable_static() @@ -1869,7 +1869,7 @@ class TestHardswishAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_hardswish(self.x_np) for r in res: - self.assertTrue(np.allclose(out_ref, r)) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -1879,7 +1879,7 @@ class TestHardswishAPI(unittest.TestCase): out2 = m(x) out_ref = ref_hardswish(self.x_np) for r in [out1, out2]: - self.assertTrue(np.allclose(out_ref, r.numpy())) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -1889,12 +1889,12 @@ class TestHardswishAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_hardswish(self.x_np) - self.assertTrue(np.allclose(out_ref, res[0])) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) paddle.disable_static(self.place) x = paddle.to_tensor(self.x_np) out = paddle.fluid.layers.hard_swish(x) - self.assertTrue(np.allclose(out_ref, out.numpy())) + np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) paddle.enable_static() def test_errors(self): @@ -2018,7 +2018,7 @@ class TestELUAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = elu(self.x_np, 1.0) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -2029,7 +2029,7 @@ class TestELUAPI(unittest.TestCase): out2 = m(x) out_ref = elu(self.x_np, 1.0) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) out1 = self.elu(x, 0.2) x = paddle.to_tensor(self.x_np) @@ -2037,7 +2037,7 @@ class TestELUAPI(unittest.TestCase): out2 = m(x) out_ref = elu(self.x_np, 0.2) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_errors(self): @@ -2118,7 +2118,7 @@ class TestCELUAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = celu(self.x_np, 1.5) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -2129,7 +2129,7 @@ class TestCELUAPI(unittest.TestCase): out2 = m(x) out_ref = celu(self.x_np, 1.5) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) out1 = self.celu(x, 0.2) x = paddle.to_tensor(self.x_np) @@ -2137,7 +2137,7 @@ class TestCELUAPI(unittest.TestCase): out2 = m(x) out_ref = celu(self.x_np, 0.2) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_errors(self): @@ -2615,7 +2615,7 @@ class TestSTanhAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -2623,7 +2623,7 @@ class TestSTanhAPI(unittest.TestCase): out = paddle.stanh(x, self.scale_a, self.scale_b) out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b) for r in [out]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -2634,7 +2634,7 @@ class TestSTanhAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b) - self.assertEqual(np.allclose(out_ref, res[0]), True) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): paddle.enable_static() @@ -2750,7 +2750,7 @@ class TestSoftplusAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_softplus(self.x_np, self.beta, self.threshold) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -2760,7 +2760,7 @@ class TestSoftplusAPI(unittest.TestCase): out2 = softplus(x) out_ref = ref_softplus(self.x_np, self.beta, self.threshold) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -2771,7 +2771,7 @@ class TestSoftplusAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_softplus(self.x_np) - self.assertEqual(np.allclose(out_ref, res[0]), True) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): paddle.enable_static() @@ -2833,7 +2833,7 @@ class TestSoftsignAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_softsign(self.x_np) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -2843,7 +2843,7 @@ class TestSoftsignAPI(unittest.TestCase): out2 = softsign(x) out_ref = ref_softsign(self.x_np) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -2854,7 +2854,7 @@ class TestSoftsignAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_softsign(self.x_np) - self.assertEqual(np.allclose(out_ref, res[0]), True) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): paddle.enable_static() @@ -2921,7 +2921,7 @@ class TestThresholdedReluAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_thresholded_relu(self.x_np, self.threshold) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -2931,7 +2931,7 @@ class TestThresholdedReluAPI(unittest.TestCase): out2 = thresholded_relu(x) out_ref = ref_thresholded_relu(self.x_np, self.threshold) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -2942,7 +2942,7 @@ class TestThresholdedReluAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_thresholded_relu(self.x_np, self.threshold) - self.assertEqual(np.allclose(out_ref, res[0]), True) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): paddle.enable_static() @@ -3023,7 +3023,7 @@ class TestHardsigmoidAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_hardsigmoid(self.x_np) for r in res: - self.assertTrue(np.allclose(out_ref, r)) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -3033,7 +3033,7 @@ class TestHardsigmoidAPI(unittest.TestCase): out2 = m(x) out_ref = ref_hardsigmoid(self.x_np) for r in [out1, out2]: - self.assertTrue(np.allclose(out_ref, r.numpy())) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -3043,12 +3043,12 @@ class TestHardsigmoidAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5) - self.assertTrue(np.allclose(out_ref, res[0])) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) paddle.disable_static(self.place) x = paddle.to_tensor(self.x_np) out = paddle.fluid.layers.hard_sigmoid(x) - self.assertTrue(np.allclose(out_ref, out.numpy())) + np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) paddle.enable_static() def test_errors(self): @@ -3115,7 +3115,7 @@ class TestSwishAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_swish(self.x_np) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -3125,7 +3125,7 @@ class TestSwishAPI(unittest.TestCase): out2 = swish(x) out_ref = ref_swish(self.x_np) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_dygraph_final_state_api(self): @@ -3140,7 +3140,7 @@ class TestSwishAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_swish(self.x_np) - self.assertEqual(np.allclose(out_ref, res[0]), True) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): paddle.enable_static() @@ -3206,7 +3206,7 @@ class TestMishAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_mish(self.x_np) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -3216,7 +3216,7 @@ class TestMishAPI(unittest.TestCase): out2 = mish(x) out_ref = ref_mish(self.x_np) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -3227,7 +3227,7 @@ class TestMishAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_mish(self.x_np) - self.assertEqual(np.allclose(out_ref, res[0]), True) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_activation_sparse_op.py b/python/paddle/fluid/tests/unittests/test_activation_sparse_op.py index 9c6a9412518..6d4e9c5eb97 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_sparse_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_sparse_op.py @@ -88,7 +88,7 @@ class TestSparseSqrtOp(unittest.TestCase): # get and compare result result_array = np.array(out_selected_rows.get_tensor()) - self.assertTrue(np.allclose(result_array, np.sqrt(np_array))) + np.testing.assert_allclose(result_array, np.sqrt(np_array), rtol=1e-05) def test_sparse_acti(self): places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index 428d0e7c210..1396d073f7c 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -904,9 +904,9 @@ class TestAdamOptimizer(unittest.TestCase): preds.append(pred) losses.append(loss) for pred in preds: - self.assertTrue(np.allclose(pred, preds[0])) + np.testing.assert_allclose(pred, preds[0], rtol=1e-05) for loss in losses: - self.assertTrue(np.allclose(loss, losses[0])) + np.testing.assert_allclose(loss, losses[0], rtol=1e-05) def test_adam_api(self): # NOTE(zhiqiu): cpu and gpu has different seed, so should compare separatly. @@ -1177,13 +1177,11 @@ class TestMultiTensorAdam(unittest.TestCase): place=place, use_amp=use_amp, use_multi_tensor=True) output_dygraph2, params_dygraph2 = self._adam_optimize_dygraph( place=place, use_amp=use_amp, use_multi_tensor=False) - self.assertEqual( - np.allclose(output_dygraph1, output_dygraph2, rtol=1e-05), True) + np.testing.assert_allclose(output_dygraph1, output_dygraph2, rtol=1e-05) for idx in range(len(params_dygraph1)): - self.assertEqual( - np.allclose(params_dygraph1[idx], - params_dygraph2[idx], - rtol=1e-05), True) + np.testing.assert_allclose(params_dygraph1[idx], + params_dygraph2[idx], + rtol=1e-05) # test static mode output_static1 = self._adam_optimize_static(place=place, use_amp=use_amp, @@ -1192,10 +1190,9 @@ class TestMultiTensorAdam(unittest.TestCase): use_amp=use_amp, use_multi_tensor=False) for idx in range(len(output_static1)): - self.assertEqual( - np.allclose(output_static1[idx], - output_static2[idx], - rtol=1e-05), True) + np.testing.assert_allclose(output_static1[idx], + output_static2[idx], + rtol=1e-05) def _check_with_param_arrt(self, place, use_amp): output1, params1 = self._adam_optimize_dygraph(place=place, @@ -1207,10 +1204,9 @@ class TestMultiTensorAdam(unittest.TestCase): use_param_attr=True, use_multi_tensor=False) - self.assertEqual(np.allclose(output1, output2, rtol=1e-05), True) + np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): - self.assertEqual( - np.allclose(params1[idx], params2[idx], rtol=1e-05), True) + np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) def _check_with_param_group(self, place, use_amp): output1, params1 = self._adam_optimize_dygraph(place=place, @@ -1222,10 +1218,9 @@ class TestMultiTensorAdam(unittest.TestCase): use_param_group=True, use_multi_tensor=False) - self.assertEqual(np.allclose(output1, output2, rtol=1e-05), True) + np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): - self.assertEqual( - np.allclose(params1[idx], params2[idx], rtol=1e-05), True) + np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) def test_main(self): for place in self._get_places(): diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py index 204a16668ad..05a6d411881 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py @@ -91,17 +91,17 @@ class TestPool1D_API(unittest.TestCase): paddings=[0], adaptive=True) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) ada_max_pool1d_dg = paddle.nn.layer.AdaptiveAvgPool1D( output_size=16) result = ada_max_pool1d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) result = paddle.nn.functional.common.interpolate(input, mode="area", size=16) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_adaptive_avg_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -119,7 +119,7 @@ class TestPool1D_API(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": input_np}, fetch_list=[result]) - self.assertTrue(np.allclose(fetches[0], result_np)) + np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def test_adaptive_avg_pool1d(self): for place in self.places: diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py index db577ec5378..7b00bbf99e7 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py @@ -82,12 +82,12 @@ class TestPool1D_API(unittest.TestCase): strides=[0], paddings=[0], adaptive=True) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) ada_max_pool1d_dg = paddle.nn.layer.AdaptiveMaxPool1D( output_size=16) result = ada_max_pool1d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_adaptive_max_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -105,7 +105,7 @@ class TestPool1D_API(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": input_np}, fetch_list=[result]) - self.assertTrue(np.allclose(fetches[0], result_np)) + np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def test_adaptive_max_pool1d(self): for place in self.places: diff --git a/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py b/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py index 14c201d7606..29da50d7286 100644 --- a/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py +++ b/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py @@ -170,7 +170,9 @@ class TestAddPositionEncodingOpDygraph(unittest.TestCase): paddle.enable_static() position_tensor_np = add_position_encoding(tensor, 1.0, 1.0) - self.assertTrue(np.allclose(position_tensor, position_tensor_np)) + np.testing.assert_allclose(position_tensor, + position_tensor_np, + rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_addmm_op.py b/python/paddle/fluid/tests/unittests/test_addmm_op.py index da2e2335f7f..8d50ca929ef 100644 --- a/python/paddle/fluid/tests/unittests/test_addmm_op.py +++ b/python/paddle/fluid/tests/unittests/test_addmm_op.py @@ -345,7 +345,9 @@ class TestAddMMAPI(unittest.TestCase): numpy_output = data_beta * data_input + data_alpha * np.dot( data_x, data_y) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) paddle.enable_static() @@ -369,7 +371,9 @@ class TestAddMMAPI(unittest.TestCase): numpy_output = data_beta * data_input + data_alpha * np.dot( data_x, data_y) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) paddle.enable_static() @@ -393,7 +397,9 @@ class TestAddMMAPI(unittest.TestCase): numpy_output = data_beta * data_input + data_alpha * np.dot( data_x, data_y) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_angle_op.py b/python/paddle/fluid/tests/unittests/test_angle_op.py index 9848c8320f2..35f3b765e67 100644 --- a/python/paddle/fluid/tests/unittests/test_angle_op.py +++ b/python/paddle/fluid/tests/unittests/test_angle_op.py @@ -99,7 +99,7 @@ class TestAngleAPI(unittest.TestCase): with dygraph.guard(): x = paddle.to_tensor(self.x) out_np = paddle.angle(x).numpy() - self.assertTrue(np.allclose(self.out, out_np)) + np.testing.assert_allclose(self.out, out_np, rtol=1e-05) def test_static(self): mp, sp = static.Program(), static.Program() @@ -110,7 +110,7 @@ class TestAngleAPI(unittest.TestCase): exe = static.Executor() exe.run(sp) [out_np] = exe.run(mp, feed={"x": self.x}, fetch_list=[out]) - self.assertTrue(np.allclose(self.out, out_np)) + np.testing.assert_allclose(self.out, out_np, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_argsort_op.py b/python/paddle/fluid/tests/unittests/test_argsort_op.py index 50350e88795..2d4066e7dad 100644 --- a/python/paddle/fluid/tests/unittests/test_argsort_op.py +++ b/python/paddle/fluid/tests/unittests/test_argsort_op.py @@ -146,8 +146,11 @@ class TestArgsortOpCPU(unittest.TestCase): py_outputs = self.py_argsort.forward() for pd_output, py_output in zip(pd_outputs, py_outputs): self.assertEqual(pd_output.shape, py_output.shape) - self.assertTrue( - np.allclose(pd_output, py_output, atol=0, equal_nan=False)) + np.testing.assert_allclose(pd_output, + py_output, + rtol=1e-05, + atol=0, + equal_nan=False) def get_numerical_gradient(self, delta=1e-7): if self.dtype == 'float16': diff --git a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py index a8d630278f7..6596a08d48a 100644 --- a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py +++ b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py @@ -24,7 +24,7 @@ from paddle.fluid.executor import Executor from paddle.fluid.backward import append_backward from paddle.fluid.framework import default_main_program from paddle.fluid import compiler, Program, program_guard -import numpy +import numpy as np def _test_read_write(x): @@ -70,7 +70,7 @@ class TestArrayReadWrite(unittest.TestCase): for each_x in x: each_x.stop_gradient = False - tensor = numpy.random.random(size=(100, 100)).astype('float32') + tensor = np.random.random(size=(100, 100)).astype('float32') a_sum, x_sum = _test_read_write(x) place = core.CPUPlace() @@ -100,7 +100,7 @@ class TestArrayReadWrite(unittest.TestCase): }, fetch_list=g_vars) ] - g_out_sum = numpy.array(g_out).sum() + g_out_sum = np.array(g_out).sum() # since our final gradient is 1 and the neural network are all linear # with mean_op. @@ -125,7 +125,7 @@ class TestArrayReadWrite(unittest.TestCase): g_out_dygraph = [ item._grad_ivar().numpy().sum() for item in x_dygraph ] - g_out_sum_dygraph = numpy.array(g_out_dygraph).sum() + g_out_sum_dygraph = np.array(g_out_dygraph).sum() self.assertAlmostEqual(1.0, g_out_sum_dygraph, delta=0.1) @@ -135,11 +135,11 @@ class TestArrayReadWriteOpError(unittest.TestCase): def _test_errors(self, use_fluid_api=True): if use_fluid_api: with program_guard(Program(), Program()): - x1 = numpy.random.randn(2, 4).astype('int32') + x1 = np.random.randn(2, 4).astype('int32') x2 = fluid.layers.fill_constant(shape=[1], dtype='int32', value=1) - x3 = numpy.random.randn(2, 4).astype('int32') + x3 = np.random.randn(2, 4).astype('int32') self.assertRaises(TypeError, fluid.layers.array_read, @@ -152,9 +152,9 @@ class TestArrayReadWriteOpError(unittest.TestCase): out=x3) else: with program_guard(Program(), Program()): - x1 = numpy.random.randn(2, 4).astype('int32') + x1 = np.random.randn(2, 4).astype('int32') x2 = paddle.ones(shape=[1], dtype='int32') - x3 = numpy.random.randn(2, 4).astype('int32') + x3 = np.random.randn(2, 4).astype('int32') self.assertRaises(TypeError, paddle.tensor.array_read, @@ -185,7 +185,7 @@ class TestArrayReadWriteApi(unittest.TestCase): item = paddle.tensor.array_read(arr, i) - self.assertTrue(numpy.allclose(x.numpy(), item.numpy())) + np.testing.assert_allclose(x.numpy(), item.numpy(), rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index 116924544fc..fa902542f16 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -109,8 +109,8 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase): res = exe.run(main_program, feed={'x': feed_x}, fetch_list=[sums.name, x.grad_name]) - self.assertTrue(np.allclose(res[0], feed_add)) - self.assertTrue(np.allclose(res[1], ones / 1000.0)) + np.testing.assert_allclose(res[0], feed_add, rtol=1e-05) + np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05) paddle.disable_static() @@ -158,8 +158,8 @@ class TestAssignOApi(unittest.TestCase): res = exe.run(main_program, feed={'x': feed_x}, fetch_list=[sums.name, x.grad_name]) - self.assertTrue(np.allclose(res[0], feed_add)) - self.assertTrue(np.allclose(res[1], ones / 1000.0)) + np.testing.assert_allclose(res[0], feed_add, rtol=1e-05) + np.testing.assert_allclose(res[1], ones / 1000.0, rtol=1e-05) paddle.disable_static() def test_assign_NumpyArray(self): @@ -167,41 +167,41 @@ class TestAssignOApi(unittest.TestCase): array = np.random.random(size=(100, 10)).astype(np.bool_) result1 = paddle.zeros(shape=[3, 3], dtype='float32') paddle.assign(array, result1) - self.assertTrue(np.allclose(result1.numpy(), array)) + np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) def test_assign_NumpyArray1(self): with fluid.dygraph.guard(): array = np.random.random(size=(100, 10)).astype(np.float32) result1 = paddle.zeros(shape=[3, 3], dtype='float32') paddle.assign(array, result1) - self.assertTrue(np.allclose(result1.numpy(), array)) + np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) def test_assign_NumpyArray2(self): with fluid.dygraph.guard(): array = np.random.random(size=(100, 10)).astype(np.int32) result1 = paddle.zeros(shape=[3, 3], dtype='float32') paddle.assign(array, result1) - self.assertTrue(np.allclose(result1.numpy(), array)) + np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) def test_assign_NumpyArray3(self): with fluid.dygraph.guard(): array = np.random.random(size=(100, 10)).astype(np.int64) result1 = paddle.zeros(shape=[3, 3], dtype='float32') paddle.assign(array, result1) - self.assertTrue(np.allclose(result1.numpy(), array)) + np.testing.assert_allclose(result1.numpy(), array, rtol=1e-05) def test_assign_List(self): l = [1, 2, 3] result = paddle.assign(l) - self.assertTrue(np.allclose(result.numpy(), np.array(l))) + np.testing.assert_allclose(result.numpy(), np.array(l), rtol=1e-05) def test_assign_BasicTypes(self): result1 = paddle.assign(2) result2 = paddle.assign(3.0) result3 = paddle.assign(True) - self.assertTrue(np.allclose(result1.numpy(), np.array([2]))) - self.assertTrue(np.allclose(result2.numpy(), np.array([3.0]))) - self.assertTrue(np.allclose(result3.numpy(), np.array([1]))) + np.testing.assert_allclose(result1.numpy(), np.array([2]), rtol=1e-05) + np.testing.assert_allclose(result2.numpy(), np.array([3.0]), rtol=1e-05) + np.testing.assert_allclose(result3.numpy(), np.array([1]), rtol=1e-05) def test_clone(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) diff --git a/python/paddle/fluid/tests/unittests/test_assign_value_op.py b/python/paddle/fluid/tests/unittests/test_assign_value_op.py index 6c6c26a8c6a..e64f1c21370 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_value_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_value_op.py @@ -88,10 +88,7 @@ class TestAssignApi(unittest.TestCase): exe = fluid.Executor(self.place) [fetched_x] = exe.run(main_program, feed={}, fetch_list=[x]) - np.testing.assert_array_equal(fetched_x, - self.value, - err_msg='fetch_x=%s val=%s' % - (fetched_x, self.value)) + np.testing.assert_array_equal(fetched_x, self.value) self.assertEqual(fetched_x.dtype, self.value.dtype) diff --git a/python/paddle/fluid/tests/unittests/test_atan2_op.py b/python/paddle/fluid/tests/unittests/test_atan2_op.py index 90e2a37453f..e9241383984 100644 --- a/python/paddle/fluid/tests/unittests/test_atan2_op.py +++ b/python/paddle/fluid/tests/unittests/test_atan2_op.py @@ -116,7 +116,7 @@ class TestAtan2API(unittest.TestCase): res = exe.run(feed={'X1': self.x1, 'X2': self.x2}) out_ref = np.arctan2(self.x1, self.x2) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) for place in self.place: run(place) @@ -129,7 +129,7 @@ class TestAtan2API(unittest.TestCase): X2 = paddle.to_tensor(self.x2) out = paddle.atan2(X1, X2) out_ref = np.arctan2(self.x1, self.x2) - self.assertEqual(np.allclose(out_ref, out.numpy()), True) + np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) paddle.enable_static() for place in self.place: diff --git a/python/paddle/fluid/tests/unittests/test_base_layer.py b/python/paddle/fluid/tests/unittests/test_base_layer.py index bf21d29caa0..993bad83630 100644 --- a/python/paddle/fluid/tests/unittests/test_base_layer.py +++ b/python/paddle/fluid/tests/unittests/test_base_layer.py @@ -75,7 +75,9 @@ class TestBaseLayer(unittest.TestCase): for name, _ in l.named_parameters(prefix='l1'): self.assertEqual(name, expected_names[idx]) idx += 1 - self.assertTrue(np.allclose(ret.numpy(), 0.2 * np.ones([2, 2]))) + np.testing.assert_allclose(ret.numpy(), + 0.2 * np.ones([2, 2]), + rtol=1e-05) def test_one_level(self): with _test_eager_guard(): @@ -100,7 +102,9 @@ class TestBaseLayer(unittest.TestCase): self.assertEqual(name, expected_names[idx]) idx += 1 ret = l() - self.assertTrue(np.allclose(ret.numpy(), 0.8 * np.ones([2, 2]))) + np.testing.assert_allclose(ret.numpy(), + 0.8 * np.ones([2, 2]), + rtol=1e-05) def test_three_level(self): with _test_eager_guard(): @@ -442,8 +446,9 @@ class TestLayerTo(unittest.TestCase): paddle.fluid.core.VarDesc.VarType.FP64) self.assertEqual(self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64) - self.assertTrue( - np.allclose(self.linear.weight.grad.numpy(), self.new_grad)) + np.testing.assert_allclose(self.linear.weight.grad.numpy(), + self.new_grad, + rtol=1e-05) self.assertEqual(self.linear.weight._grad_ivar().dtype, paddle.fluid.core.VarDesc.VarType.FP64) @@ -452,8 +457,9 @@ class TestLayerTo(unittest.TestCase): paddle.fluid.core.VarDesc.VarType.FP64) self.assertEqual(self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64) - self.assertTrue( - np.allclose(self.linear.weight.grad.numpy(), self.new_grad)) + np.testing.assert_allclose(self.linear.weight.grad.numpy(), + self.new_grad, + rtol=1e-05) self.assertEqual(self.linear.weight._grad_ivar().dtype, paddle.fluid.core.VarDesc.VarType.FP64) for p in self.linear.parameters(): @@ -511,8 +517,9 @@ class TestLayerTo(unittest.TestCase): paddle.fluid.core.VarDesc.VarType.FP64) self.assertEqual(self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64) - self.assertTrue( - np.allclose(self.linear.weight.grad.numpy(), self.new_grad)) + np.testing.assert_allclose(self.linear.weight.grad.numpy(), + self.new_grad, + rtol=1e-05) self.assertEqual(self.linear.weight._grad_ivar().dtype, paddle.fluid.core.VarDesc.VarType.FP64) @@ -521,8 +528,9 @@ class TestLayerTo(unittest.TestCase): paddle.fluid.core.VarDesc.VarType.FP64) self.assertEqual(self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64) - self.assertTrue( - np.allclose(self.linear.weight.grad.numpy(), self.new_grad)) + np.testing.assert_allclose(self.linear.weight.grad.numpy(), + self.new_grad, + rtol=1e-05) self.assertEqual(self.linear.weight._grad_ivar().dtype, paddle.fluid.core.VarDesc.VarType.FP64) for p in self.linear.parameters(): @@ -538,8 +546,9 @@ class TestLayerTo(unittest.TestCase): paddle.fluid.core.VarDesc.VarType.FP64) self.assertEqual(self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64) - self.assertTrue( - np.allclose(self.linear.weight.grad.numpy(), self.new_grad)) + np.testing.assert_allclose(self.linear.weight.grad.numpy(), + self.new_grad, + rtol=1e-05) self.assertEqual(self.linear.weight._grad_ivar().dtype, paddle.fluid.core.VarDesc.VarType.FP64) @@ -548,8 +557,9 @@ class TestLayerTo(unittest.TestCase): paddle.fluid.core.VarDesc.VarType.FP64) self.assertEqual(self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64) - self.assertTrue( - np.allclose(self.linear.weight.grad.numpy(), self.new_grad)) + np.testing.assert_allclose(self.linear.weight.grad.numpy(), + self.new_grad, + rtol=1e-05) self.assertEqual(self.linear.weight._grad_ivar().dtype, paddle.fluid.core.VarDesc.VarType.FP64) for p in self.linear.parameters(): diff --git a/python/paddle/fluid/tests/unittests/test_basic_gru_api.py b/python/paddle/fluid/tests/unittests/test_basic_gru_api.py index 2a06f192777..041580e2ec4 100644 --- a/python/paddle/fluid/tests/unittests/test_basic_gru_api.py +++ b/python/paddle/fluid/tests/unittests/test_basic_gru_api.py @@ -330,10 +330,12 @@ class TestBasicGRUApi(unittest.TestCase): is_bidirect=self.is_bidirect, sequence_length=sequence_length_np) - self.assertTrue(np.allclose(api_rnn_out, np_out[0], rtol=1e-4, atol=0)) + np.testing.assert_allclose(api_rnn_out, np_out[0], rtol=0.0001, atol=0) - self.assertTrue( - np.allclose(api_last_hidden, np_out[1], rtol=1e-4, atol=0)) + np.testing.assert_allclose(api_last_hidden, + np_out[1], + rtol=0.0001, + atol=0) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_basic_gru_unit_op.py b/python/paddle/fluid/tests/unittests/test_basic_gru_unit_op.py index 7c4c8ff5aee..d02fe95383c 100644 --- a/python/paddle/fluid/tests/unittests/test_basic_gru_unit_op.py +++ b/python/paddle/fluid/tests/unittests/test_basic_gru_unit_op.py @@ -143,7 +143,7 @@ class TestBasicGRUUnit(unittest.TestCase): np_out = step(step_input_np, pre_hidden_np, gate_w, gate_b, candidate_w, candidate_b) - self.assertTrue(np.allclose(api_out, np_out, rtol=1e-4, atol=0)) + np.testing.assert_allclose(api_out, np_out, rtol=0.0001, atol=0) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_basic_lstm_api.py b/python/paddle/fluid/tests/unittests/test_basic_lstm_api.py index abe0d6f8d56..4c7e24b91a9 100644 --- a/python/paddle/fluid/tests/unittests/test_basic_lstm_api.py +++ b/python/paddle/fluid/tests/unittests/test_basic_lstm_api.py @@ -298,11 +298,15 @@ class TestBasicLSTMApi(unittest.TestCase): is_bidirect=self.is_bidirect, sequence_length=sequence_length_np) - self.assertTrue(np.allclose(api_rnn_out, np_out[0], rtol=1e-4, atol=0)) - self.assertTrue( - np.allclose(api_last_hidden, np_out[1], rtol=1e-4, atol=0)) - self.assertTrue(np.allclose(api_last_cell, np_out[2], rtol=1e-4, - atol=0)) + np.testing.assert_allclose(api_rnn_out, np_out[0], rtol=0.0001, atol=0) + np.testing.assert_allclose(api_last_hidden, + np_out[1], + rtol=0.0001, + atol=0) + np.testing.assert_allclose(api_last_cell, + np_out[2], + rtol=0.0001, + atol=0) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_basic_lstm_unit_op.py b/python/paddle/fluid/tests/unittests/test_basic_lstm_unit_op.py index 9f76d7d736f..a0596e726ad 100644 --- a/python/paddle/fluid/tests/unittests/test_basic_lstm_unit_op.py +++ b/python/paddle/fluid/tests/unittests/test_basic_lstm_unit_op.py @@ -126,10 +126,14 @@ class TestBasicGRUUnit(unittest.TestCase): np_hidden_out, np_cell_out = step(step_input_np, pre_hidden_np, pre_cell_np, gate_w, gate_b) - self.assertTrue( - np.allclose(api_hidden_out, np_hidden_out, rtol=1e-4, atol=0)) - self.assertTrue( - np.allclose(api_cell_out, np_cell_out, rtol=1e-4, atol=0)) + np.testing.assert_allclose(api_hidden_out, + np_hidden_out, + rtol=0.0001, + atol=0) + np.testing.assert_allclose(api_cell_out, + np_cell_out, + rtol=0.0001, + atol=0) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index b312baea932..c4cf260d396 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -216,7 +216,11 @@ class TestBatchNormOpInference(unittest.TestCase): self.init_kernel_type() def __assert_close(self, tensor, np_array, msg, atol=1e-4): - self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) + np.testing.assert_allclose(np.array(tensor), + np_array, + rtol=1e-05, + atol=atol, + err_msg=msg) def check_with_place(self, place, data_layout, dtype, shape): epsilon = 0.00001 @@ -688,7 +692,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1 = compute(x, False, False) y2 = compute(x, True, True) - self.assertTrue(np.allclose(y1, y2)) + np.testing.assert_allclose(y1, y2, rtol=1e-05) def test_static(self): places = [fluid.CPUPlace()] @@ -713,7 +717,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1 = compute(x, False, False) y2 = compute(x, True, True) - self.assertTrue(np.allclose(y1, y2)) + np.testing.assert_allclose(y1, y2, rtol=1e-05) class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py index 7c569b70031..7b60c31191d 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py @@ -112,16 +112,16 @@ class TestBatchNorm(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1, g1 = compute_baseline(x) y2, g2 = compute_1d(x) - self.assertTrue(np.allclose(g1, g2)) - self.assertTrue(np.allclose(y1, y2)) + np.testing.assert_allclose(g1, g2, rtol=1e-05) + np.testing.assert_allclose(y1, y2, rtol=1e-05) # [N, C, L] shape = [1000000, 4, 4] x = np.random.randn(*shape).astype("float32") y1, g1 = compute_baseline(x) y2, g2 = compute_1d(x) - self.assertTrue(np.allclose(g1, g2)) - self.assertTrue(np.allclose(y1, y2)) + np.testing.assert_allclose(g1, g2, rtol=1e-05) + np.testing.assert_allclose(y1, y2, rtol=1e-05) def test_eager_api(self): places = [fluid.CPUPlace()] @@ -154,8 +154,8 @@ class TestBatchNorm(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1, g1 = compute_v1(x) y2, g2 = compute_v2(x) - self.assertTrue(np.allclose(g1, g2)) - self.assertTrue(np.allclose(y1, y2)) + np.testing.assert_allclose(g1, g2, rtol=1e-05) + np.testing.assert_allclose(y1, y2, rtol=1e-05) def test_dygraph(self): places = [fluid.CPUPlace()] @@ -212,8 +212,8 @@ class TestBatchNorm(unittest.TestCase): y2 = compute_v2(x) y3 = compute_v3(x, False, False) y4 = compute_v4(x) - self.assertTrue(np.allclose(y1, y2)) - self.assertTrue(np.allclose(y3, y4)) + np.testing.assert_allclose(y1, y2, rtol=1e-05) + np.testing.assert_allclose(y3, y4, rtol=1e-05) def test_static(self): places = [fluid.CPUPlace()] @@ -247,7 +247,7 @@ class TestBatchNorm(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1 = compute_v1(x, False, False) y2 = compute_v2(x) - self.assertTrue(np.allclose(y1, y2)) + np.testing.assert_allclose(y1, y2, rtol=1e-05) class TestBatchNormChannelLast(unittest.TestCase): @@ -280,10 +280,14 @@ class TestBatchNormChannelLast(unittest.TestCase): y2 = paddle.transpose(y2, [0, 2, 1]) if core.is_compiled_with_rocm(): # HIP will fail if no atol - self.assertEqual( - np.allclose(y1.numpy(), y2.numpy(), atol=1e-07), True) + np.testing.assert_allclose(y1.numpy(), + y2.numpy(), + rtol=1e-05, + atol=1e-07) else: - self.assertEqual(np.allclose(y1.numpy(), y2.numpy()), True) + np.testing.assert_allclose(y1.numpy(), + y2.numpy(), + rtol=1e-05) def test_2d(self): for p in self.places: @@ -299,10 +303,14 @@ class TestBatchNormChannelLast(unittest.TestCase): y2 = paddle.transpose(y2, [0, 2, 3, 1]) if core.is_compiled_with_rocm(): # HIP will fail if no atol - self.assertEqual( - np.allclose(y1.numpy(), y2.numpy(), atol=1e-07), True) + np.testing.assert_allclose(y1.numpy(), + y2.numpy(), + rtol=1e-05, + atol=1e-07) else: - self.assertEqual(np.allclose(y1.numpy(), y2.numpy()), True) + np.testing.assert_allclose(y1.numpy(), + y2.numpy(), + rtol=1e-05) def test_3d(self): for p in self.places: @@ -318,10 +326,14 @@ class TestBatchNormChannelLast(unittest.TestCase): y2 = paddle.transpose(y2, [0, 2, 3, 4, 1]) if core.is_compiled_with_rocm(): # HIP will fail if no atol - self.assertEqual( - np.allclose(y1.numpy(), y2.numpy(), atol=1e-07), True) + np.testing.assert_allclose(y1.numpy(), + y2.numpy(), + rtol=1e-05, + atol=1e-07) else: - self.assertEqual(np.allclose(y1.numpy(), y2.numpy()), True) + np.testing.assert_allclose(y1.numpy(), + y2.numpy(), + rtol=1e-05) def test_1d_opt(self): with fluid.dygraph.guard(): @@ -385,7 +397,7 @@ class TestBatchNormUseGlobalStats(unittest.TestCase): net2.training = False y1 = net1(x) y2 = net2(x) - self.assertEqual(np.allclose(y1.numpy(), y2.numpy()), True) + np.testing.assert_allclose(y1.numpy(), y2.numpy(), rtol=1e-05) class TestBatchNormUseGlobalStatsCase1(TestBatchNormUseGlobalStats): diff --git a/python/paddle/fluid/tests/unittests/test_beam_search_op.py b/python/paddle/fluid/tests/unittests/test_beam_search_op.py index e4fe6580ea1..8d47b7f63cc 100644 --- a/python/paddle/fluid/tests/unittests/test_beam_search_op.py +++ b/python/paddle/fluid/tests/unittests/test_beam_search_op.py @@ -60,12 +60,16 @@ class BeamSearchOpTester(unittest.TestCase): selected_ids = self.scope.find_var("selected_ids").get_tensor() selected_scores = self.scope.find_var("selected_scores").get_tensor() parent_idx = self.scope.find_var("parent_idx").get_tensor() - self.assertTrue(np.allclose(np.array(selected_ids), self.output_ids)) - self.assertTrue( - np.allclose(np.array(selected_scores), self.output_scores)) + np.testing.assert_allclose(np.array(selected_ids), + self.output_ids, + rtol=1e-05) + np.testing.assert_allclose(np.array(selected_scores), + self.output_scores, + rtol=1e-05) self.assertEqual(selected_ids.lod(), self.output_lod) - self.assertTrue( - np.allclose(np.array(parent_idx), self.output_parent_idx)) + np.testing.assert_allclose(np.array(parent_idx), + self.output_parent_idx, + rtol=1e-05) def _create_pre_ids(self): np_data = np.array([[1, 2, 3, 4]], dtype='int64') diff --git a/python/paddle/fluid/tests/unittests/test_bernoulli_op.py b/python/paddle/fluid/tests/unittests/test_bernoulli_op.py index 8ab6968eb21..135b559c99b 100644 --- a/python/paddle/fluid/tests/unittests/test_bernoulli_op.py +++ b/python/paddle/fluid/tests/unittests/test_bernoulli_op.py @@ -42,8 +42,7 @@ class TestBernoulliOp(OpTest): def verify_output(self, outs): hist, prob = output_hist(np.array(outs[0])) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestBernoulliApi(unittest.TestCase): @@ -54,8 +53,7 @@ class TestBernoulliApi(unittest.TestCase): out = paddle.bernoulli(x) paddle.enable_static() hist, prob = output_hist(out.numpy()) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) def test_static(self): x = paddle.rand([1024, 1024]) @@ -64,8 +62,7 @@ class TestBernoulliApi(unittest.TestCase): out = exe.run(paddle.static.default_main_program(), fetch_list=[out.name]) hist, prob = output_hist(out[0]) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestRandomValue(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_bfgs.py b/python/paddle/fluid/tests/unittests/test_bfgs.py index 08ec4a23806..f7023fba86b 100644 --- a/python/paddle/fluid/tests/unittests/test_bfgs.py +++ b/python/paddle/fluid/tests/unittests/test_bfgs.py @@ -89,10 +89,16 @@ class TestBfgs(unittest.TestCase): x0 = np.random.random(size=[dimension]).astype('float32') results = test_static_graph(func=func, x0=x0) - self.assertTrue(np.allclose(minimum, results[2])) + np.testing.assert_allclose(minimum, + results[2], + rtol=1e-05, + atol=1e-8) results = test_dynamic_graph(func=func, x0=x0) - self.assertTrue(np.allclose(minimum, results[2].numpy())) + np.testing.assert_allclose(minimum, + results[2].numpy(), + rtol=1e-05, + atol=1e-8) def test_inf_minima(self): extream_point = np.array([-1, 2]).astype('float32') @@ -120,7 +126,7 @@ class TestBfgs(unittest.TestCase): x0 = np.array([0.82], dtype='float64') results = test_static_graph(func, x0, dtype='float64') - self.assertTrue(np.allclose(0.8, results[2])) + np.testing.assert_allclose(0.8, results[2], rtol=1e-05, atol=1e-8) def func_rosenbrock(self): # The Rosenbrock function is a standard optimization test case. @@ -139,7 +145,7 @@ class TestBfgs(unittest.TestCase): x0 = np.random.random(size=[2]).astype('float32') results = test_dynamic_graph(func, x0) - self.assertTrue(np.allclose(minimum, results[2])) + np.testing.assert_allclose(minimum, results[2], rtol=1e-05, atol=1e-8) def test_rosenbrock(self): with _test_eager_guard(): @@ -156,7 +162,10 @@ class TestBfgs(unittest.TestCase): # test initial_inverse_hessian_estimate is good results = test_static_graph_H0(func, x0, H0, dtype='float32') - self.assertTrue(np.allclose([0., 0.], results[2])) + np.testing.assert_allclose([0.0, 0.0], + results[2], + rtol=1e-05, + atol=1e-8) self.assertTrue(results[0][0]) # test initial_inverse_hessian_estimate is bad diff --git a/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py b/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py index 281d6811c62..612d79e2b3a 100644 --- a/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py @@ -351,7 +351,7 @@ class TestBicubicInterpOpAPI(unittest.TestCase): out_w=12, align_corners=False) for res in results: - self.assertTrue(np.allclose(res, expect_res)) + np.testing.assert_allclose(res, expect_res, rtol=1e-05) with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(x_data) @@ -364,7 +364,7 @@ class TestBicubicInterpOpAPI(unittest.TestCase): out_h=12, out_w=12, align_corners=False) - self.assertTrue(np.allclose(dy_result, expect)) + np.testing.assert_allclose(dy_result, expect, rtol=1e-05) class TestBicubicOpError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py index c27c1aa0f9e..6dc98456584 100644 --- a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py @@ -430,7 +430,7 @@ class TestBicubicInterpOpAPI(unittest.TestCase): out_w=12, align_corners=False) for res in results: - self.assertTrue(np.allclose(res, expect_res)) + np.testing.assert_allclose(res, expect_res, rtol=1e-05) with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(x_data) @@ -443,7 +443,7 @@ class TestBicubicInterpOpAPI(unittest.TestCase): out_h=12, out_w=12, align_corners=False) - self.assertTrue(np.allclose(dy_result, expect)) + np.testing.assert_allclose(dy_result, expect, rtol=1e-05) class TestBicubicOpError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py index fa80b8ac0f8..e3a72f8240c 100755 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py @@ -577,7 +577,7 @@ class TestBilinearInterpOpAPI(unittest.TestCase): out_w=12, align_corners=True) for res in results: - self.assertTrue(np.allclose(res, expect_res)) + np.testing.assert_allclose(res, expect_res, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py index 788bd0fc411..82acee3337f 100755 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_v2_op.py @@ -635,7 +635,7 @@ class TestBilinearInterpOpAPI(unittest.TestCase): out_w=12, align_corners=True) for res in results: - self.assertTrue(np.allclose(res, expect_res)) + np.testing.assert_allclose(res, expect_res, rtol=1e-05) class TestBilinearInterpOpAPI_dy(unittest.TestCase): @@ -657,7 +657,7 @@ class TestBilinearInterpOpAPI_dy(unittest.TestCase): size=[12, 12], mode="bilinear", align_corners=False) - self.assertTrue(np.allclose(out.numpy(), expect_res)) + np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) class TestBilinearInterpOpAPI_dy2(unittest.TestCase): @@ -681,7 +681,7 @@ class TestBilinearInterpOpAPI_dy2(unittest.TestCase): size=size, mode="bilinear", align_corners=False) - self.assertTrue(np.allclose(out.numpy(), expect_res)) + np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) class TestBilinearInterpOpAPI_dy3(unittest.TestCase): @@ -705,7 +705,7 @@ class TestBilinearInterpOpAPI_dy3(unittest.TestCase): size=[size, size], mode="bilinear", align_corners=False) - self.assertTrue(np.allclose(out.numpy(), expect_res)) + np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) class TestBilinearInterpOpAPI_dy4(unittest.TestCase): @@ -729,7 +729,7 @@ class TestBilinearInterpOpAPI_dy4(unittest.TestCase): scale_factor=scale, mode="bilinear", align_corners=False) - self.assertTrue(np.allclose(out.numpy(), expect_res)) + np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_bmm_op.py b/python/paddle/fluid/tests/unittests/test_bmm_op.py index 5e5c41ae882..1019e594157 100644 --- a/python/paddle/fluid/tests/unittests/test_bmm_op.py +++ b/python/paddle/fluid/tests/unittests/test_bmm_op.py @@ -62,7 +62,7 @@ class API_TestBmm(unittest.TestCase): }, fetch_list=[result_bmm]) expected_result = np.matmul(input1, input2) - self.assertTrue(np.allclose(expected_result, result)) + np.testing.assert_allclose(expected_result, result, rtol=1e-05) class API_TestDygraphBmm(unittest.TestCase): @@ -78,7 +78,7 @@ class API_TestDygraphBmm(unittest.TestCase): out = paddle.bmm(x, y) out_np = out.numpy() expected_result = np.matmul(input1, input2) - self.assertTrue(np.allclose(expected_result, out_np)) + np.testing.assert_allclose(expected_result, out_np, rtol=1e-05) class TestBmmAPIError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_box_coder_op.py b/python/paddle/fluid/tests/unittests/test_box_coder_op.py index 4d18d0a2a1f..18959d80b0e 100644 --- a/python/paddle/fluid/tests/unittests/test_box_coder_op.py +++ b/python/paddle/fluid/tests/unittests/test_box_coder_op.py @@ -277,9 +277,9 @@ class TestBoxCoderOpWithVarianceDygraphAPI(unittest.TestCase): "decode_center_size", self.box_normalized, axis=self.axis) - self.assertEqual( - np.allclose(np.sum(self.output_ref), - np.sum(output_box.numpy())), True) + np.testing.assert_allclose(np.sum(self.output_ref), + np.sum(output_box.numpy()), + rtol=1e-05) paddle.enable_static() for place in self.place: diff --git a/python/paddle/fluid/tests/unittests/test_bucketize_api.py b/python/paddle/fluid/tests/unittests/test_bucketize_api.py index 569de1574ba..88816342d94 100644 --- a/python/paddle/fluid/tests/unittests/test_bucketize_api.py +++ b/python/paddle/fluid/tests/unittests/test_bucketize_api.py @@ -57,8 +57,8 @@ class TestBucketizeAPI(unittest.TestCase): out_ref1 = np.searchsorted(self.sorted_sequence, self.x, side='right') - self.assertTrue(np.allclose(out_ref, res[0])) - self.assertTrue(np.allclose(out_ref1, res[1])) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) + np.testing.assert_allclose(out_ref1, res[1], rtol=1e-05) for place in self.place: run(place) @@ -75,8 +75,8 @@ class TestBucketizeAPI(unittest.TestCase): out_ref2 = np.searchsorted(self.sorted_sequence, self.x, side='right') - self.assertEqual(np.allclose(out_ref1, out1.numpy()), True) - self.assertEqual(np.allclose(out_ref2, out2.numpy()), True) + np.testing.assert_allclose(out_ref1, out1.numpy(), rtol=1e-05) + np.testing.assert_allclose(out_ref2, out2.numpy(), rtol=1e-05) paddle.enable_static() for place in self.place: diff --git a/python/paddle/fluid/tests/unittests/test_case.py b/python/paddle/fluid/tests/unittests/test_case.py index 79bb1e0bffd..290ab273c99 100644 --- a/python/paddle/fluid/tests/unittests/test_case.py +++ b/python/paddle/fluid/tests/unittests/test_case.py @@ -73,11 +73,11 @@ class TestAPICase(unittest.TestCase): res = exe.run(main_program, fetch_list=[out_0, out_1, out_2, out_3, out_4]) - self.assertTrue(np.allclose(res[0], 1)) - self.assertTrue(np.allclose(res[1], 2)) - self.assertTrue(np.allclose(res[2], 3)) - self.assertTrue(np.allclose(res[3], 2)) - self.assertTrue(np.allclose(res[4], 2)) + np.testing.assert_allclose(res[0], 1, rtol=1e-05) + np.testing.assert_allclose(res[1], 2, rtol=1e-05) + np.testing.assert_allclose(res[2], 3, rtol=1e-05) + np.testing.assert_allclose(res[3], 2, rtol=1e-05) + np.testing.assert_allclose(res[4], 2, rtol=1e-05) def test_return_var_tuple(self): @@ -119,10 +119,12 @@ class TestAPICase(unittest.TestCase): exe = fluid.Executor(place) ret = exe.run(main_program, fetch_list=out) - self.assertTrue( - np.allclose(np.asarray(ret[0]), np.full((1, 2), 1, np.int32))) - self.assertTrue( - np.allclose(np.asarray(ret[1]), np.full((2, 3), 2, np.float32))) + np.testing.assert_allclose(np.asarray(ret[0]), + np.full((1, 2), 1, np.int32), + rtol=1e-05) + np.testing.assert_allclose(np.asarray(ret[1]), + np.full((2, 3), 2, np.float32), + rtol=1e-05) class TestAPICase_Nested(unittest.TestCase): @@ -188,9 +190,9 @@ class TestAPICase_Nested(unittest.TestCase): res = exe.run(main_program, fetch_list=[out_1, out_2, out_3]) - self.assertTrue(np.allclose(res[0], 1)) - self.assertTrue(np.allclose(res[1], 2)) - self.assertTrue(np.allclose(res[2], 3)) + np.testing.assert_allclose(res[0], 1, rtol=1e-05) + np.testing.assert_allclose(res[1], 2, rtol=1e-05) + np.testing.assert_allclose(res[2], 3, rtol=1e-05) class TestAPICase_Error(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_channel_shuffle.py b/python/paddle/fluid/tests/unittests/test_channel_shuffle.py index eaccb2193dc..50df6bbc024 100644 --- a/python/paddle/fluid/tests/unittests/test_channel_shuffle.py +++ b/python/paddle/fluid/tests/unittests/test_channel_shuffle.py @@ -173,11 +173,13 @@ class TestChannelShuffleAPI(unittest.TestCase): data_format=data_format) result = channel_shuffle(paddle.to_tensor(x)) - self.assertTrue(np.allclose(result.numpy(), npresult)) + np.testing.assert_allclose(result.numpy(), npresult, rtol=1e-05) result_functional = F.channel_shuffle(paddle.to_tensor(x), 3, data_format) - self.assertTrue(np.allclose(result_functional.numpy(), npresult)) + np.testing.assert_allclose(result_functional.numpy(), + npresult, + rtol=1e-05) channel_shuffle_str = 'groups={}'.format(groups) if data_format != 'NCHW': diff --git a/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py b/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py index d03cfed9697..4c0285e5178 100644 --- a/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py @@ -188,7 +188,7 @@ class TestCholeskySolveAPI(unittest.TestCase): "y": umat }, fetch_list=[z]) - self.assertTrue(np.allclose(fetches[0], z_np)) + np.testing.assert_allclose(fetches[0], z_np, rtol=1e-05) #test in static mode def test_static(self): @@ -208,7 +208,7 @@ class TestCholeskySolveAPI(unittest.TestCase): y = paddle.to_tensor(y_np) z = paddle.linalg.cholesky_solve(x, y, upper=self.upper) - self.assertTrue(np.allclose(z_np, z.numpy())) + np.testing.assert_allclose(z_np, z.numpy(), rtol=1e-05) self.assertEqual(z_np.shape, z.numpy().shape) paddle.enable_static() @@ -230,7 +230,7 @@ class TestCholeskySolveAPI(unittest.TestCase): y = paddle.to_tensor(y_np) z = paddle.linalg.cholesky_solve(x, y, upper=self.upper) self.assertEqual(z_sci.shape, z.numpy().shape) - self.assertTrue(np.allclose(z_sci, z.numpy())) + np.testing.assert_allclose(z_sci, z.numpy(), rtol=1e-05) for idx, place in enumerate(self.place): run(place) diff --git a/python/paddle/fluid/tests/unittests/test_chunk_op.py b/python/paddle/fluid/tests/unittests/test_chunk_op.py index d7362430f1a..c50935908d1 100644 --- a/python/paddle/fluid/tests/unittests/test_chunk_op.py +++ b/python/paddle/fluid/tests/unittests/test_chunk_op.py @@ -72,9 +72,9 @@ class API_TestChunk(unittest.TestCase): }, fetch_list=[x0, x1, x2]) ex_x0, ex_x1, ex_x2 = np.array_split(input1, 3, axis=2) - self.assertTrue(np.allclose(ex_x0, r0)) - self.assertTrue(np.allclose(ex_x1, r1)) - self.assertTrue(np.allclose(ex_x2, r2)) + np.testing.assert_allclose(ex_x0, r0, rtol=1e-05) + np.testing.assert_allclose(ex_x1, r1, rtol=1e-05) + np.testing.assert_allclose(ex_x2, r2, rtol=1e-05) class API_TestChunk1(unittest.TestCase): @@ -89,9 +89,9 @@ class API_TestChunk1(unittest.TestCase): r0, r1, r2, = exe.run(feed={"data1": input1}, fetch_list=[x0, x1, x2]) ex_x0, ex_x1, ex_x2 = np.array_split(input1, 3, axis=2) - self.assertTrue(np.allclose(ex_x0, r0)) - self.assertTrue(np.allclose(ex_x1, r1)) - self.assertTrue(np.allclose(ex_x2, r2)) + np.testing.assert_allclose(ex_x0, r0, rtol=1e-05) + np.testing.assert_allclose(ex_x1, r1, rtol=1e-05) + np.testing.assert_allclose(ex_x2, r2, rtol=1e-05) class API_TestDygraphChunk(unittest.TestCase): @@ -106,9 +106,9 @@ class API_TestDygraphChunk(unittest.TestCase): x1_out = x1.numpy() x2_out = x2.numpy() ex_x0, ex_x1, ex_x2 = np.array_split(input_1, 3, axis=1) - self.assertTrue(np.allclose(ex_x0, x0_out)) - self.assertTrue(np.allclose(ex_x1, x1_out)) - self.assertTrue(np.allclose(ex_x2, x2_out)) + np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05) + np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05) + np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) def test_out2(self): with fluid.dygraph.guard(): @@ -120,9 +120,9 @@ class API_TestDygraphChunk(unittest.TestCase): x1_out = x1.numpy() x2_out = x2.numpy() ex_x0, ex_x1, ex_x2 = np.array_split(input_1, 3, axis=1) - self.assertTrue(np.allclose(ex_x0, x0_out)) - self.assertTrue(np.allclose(ex_x1, x1_out)) - self.assertTrue(np.allclose(ex_x2, x2_out)) + np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05) + np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05) + np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) def test_axis_tensor_input(self): with fluid.dygraph.guard(): @@ -135,9 +135,9 @@ class API_TestDygraphChunk(unittest.TestCase): x1_out = x1.numpy() x2_out = x2.numpy() ex_x0, ex_x1, ex_x2 = np.array_split(input_1, 3, axis=1) - self.assertTrue(np.allclose(ex_x0, x0_out)) - self.assertTrue(np.allclose(ex_x1, x1_out)) - self.assertTrue(np.allclose(ex_x2, x2_out)) + np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05) + np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05) + np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py b/python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py index 04b9c5b8b8b..306745f7bec 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py @@ -148,11 +148,11 @@ class TestClipByNormOpWithSelectedRows(unittest.TestCase): output = self.max_norm * y_np / norm else: output = y_np - self.assertTrue( - np.allclose(np.array(out_tensor), - output, - atol=1e-5, - equal_nan=False)) + np.testing.assert_allclose(np.array(out_tensor), + output, + rtol=1e-05, + atol=1e-05, + equal_nan=False) def test_clip_by_norm_with_selected_ros(self): places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_clip_op.py b/python/paddle/fluid/tests/unittests/test_clip_op.py index 61ff4a63bef..626ceb04a4f 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_op.py @@ -196,21 +196,23 @@ class TestClipAPI(unittest.TestCase): out_10, out_11 ]) - self.assertTrue(np.allclose(res1, data.clip(0.2, 0.8))) - self.assertTrue(np.allclose(res2, data.clip(0.2, 0.9))) - self.assertTrue(np.allclose(res3, data.clip(min=0.3))) - self.assertTrue(np.allclose(res4, data.clip(max=0.7))) - self.assertTrue(np.allclose(res5, data.clip(min=0.2))) - self.assertTrue(np.allclose(res6, data.clip(max=0.8))) - self.assertTrue(np.allclose(res7, data.clip(max=-1))) - self.assertTrue(np.allclose(res8, data)) - self.assertTrue( - np.allclose(res9, - data.astype(np.float64).clip(0.2, 0.9))) - self.assertTrue( - np.allclose(res10, (data * 10).astype(np.int32).clip(2, 8))) - self.assertTrue( - np.allclose(res11, (data * 10).astype(np.int64).clip(2, 8))) + np.testing.assert_allclose(res1, data.clip(0.2, 0.8), rtol=1e-05) + np.testing.assert_allclose(res2, data.clip(0.2, 0.9), rtol=1e-05) + np.testing.assert_allclose(res3, data.clip(min=0.3), rtol=1e-05) + np.testing.assert_allclose(res4, data.clip(max=0.7), rtol=1e-05) + np.testing.assert_allclose(res5, data.clip(min=0.2), rtol=1e-05) + np.testing.assert_allclose(res6, data.clip(max=0.8), rtol=1e-05) + np.testing.assert_allclose(res7, data.clip(max=-1), rtol=1e-05) + np.testing.assert_allclose(res8, data, rtol=1e-05) + np.testing.assert_allclose(res9, + data.astype(np.float64).clip(0.2, 0.9), + rtol=1e-05) + np.testing.assert_allclose(res10, + (data * 10).astype(np.int32).clip(2, 8), + rtol=1e-05) + np.testing.assert_allclose(res11, + (data * 10).astype(np.int64).clip(2, 8), + rtol=1e-05) paddle.disable_static() def func_clip_dygraph(self): @@ -239,14 +241,24 @@ class TestClipAPI(unittest.TestCase): # test with numpy.generic out_6 = self._executed_api(images, min=np.abs(0.2), max=np.abs(0.8)) - self.assertTrue(np.allclose(out_1.numpy(), data.clip(0.2, 0.8))) - self.assertTrue(np.allclose(out_2.numpy(), data.clip(0.2, 0.9))) - self.assertTrue(np.allclose(out_3.numpy(), data.clip(0.2, 0.8))) - self.assertTrue( - np.allclose(out_4.numpy(), (data * 10).astype(np.int32).clip(2, 8))) - self.assertTrue( - np.allclose(out_5.numpy(), (data * 10).astype(np.int64).clip(2, 8))) - self.assertTrue(np.allclose(out_6.numpy(), data.clip(0.2, 0.8))) + np.testing.assert_allclose(out_1.numpy(), + data.clip(0.2, 0.8), + rtol=1e-05) + np.testing.assert_allclose(out_2.numpy(), + data.clip(0.2, 0.9), + rtol=1e-05) + np.testing.assert_allclose(out_3.numpy(), + data.clip(0.2, 0.8), + rtol=1e-05) + np.testing.assert_allclose(out_4.numpy(), + (data * 10).astype(np.int32).clip(2, 8), + rtol=1e-05) + np.testing.assert_allclose(out_5.numpy(), + (data * 10).astype(np.int64).clip(2, 8), + rtol=1e-05) + np.testing.assert_allclose(out_6.numpy(), + data.clip(0.2, 0.8), + rtol=1e-05) def test_clip_dygraph(self): with _test_eager_guard(): @@ -268,9 +280,9 @@ class TestClipAPI(unittest.TestCase): out1 = paddle.clip(x_int32, min=1) out2 = paddle.clip(x_int64, min=1) out3 = paddle.clip(x_f32, min=1) - self.assertTrue(np.allclose(out1.numpy(), egr_out1.numpy())) - self.assertTrue(np.allclose(out2.numpy(), egr_out2.numpy())) - self.assertTrue(np.allclose(out3.numpy(), egr_out3.numpy())) + np.testing.assert_allclose(out1.numpy(), egr_out1.numpy(), rtol=1e-05) + np.testing.assert_allclose(out2.numpy(), egr_out2.numpy(), rtol=1e-05) + np.testing.assert_allclose(out3.numpy(), egr_out3.numpy(), rtol=1e-05) def test_errors(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_collective_api_base.py b/python/paddle/fluid/tests/unittests/test_collective_api_base.py index e52da771b89..4131239adf7 100644 --- a/python/paddle/fluid/tests/unittests/test_collective_api_base.py +++ b/python/paddle/fluid/tests/unittests/test_collective_api_base.py @@ -316,31 +316,35 @@ class TestDistBase(unittest.TestCase): need_result = np.vstack((input1, input2)) tr_out0 = np.vstack((tr0_out[0], tr0_out[1])) tr_out1 = np.vstack((tr1_out[0], tr1_out[1])) - self.assertTrue(np.allclose(tr_out0, need_result)) - self.assertTrue(np.allclose(tr_out1, need_result)) + np.testing.assert_allclose(tr_out0, need_result, rtol=1e-05) + np.testing.assert_allclose(tr_out1, need_result, rtol=1e-05) if col_type == "allgather_object": need_result = [input1, input2] self.assertEqual(need_result, tr0_out) self.assertEqual(need_result, tr1_out) elif col_type == "broadcast": need_result = input2 - self.assertTrue(np.allclose(tr0_out, need_result)) - self.assertTrue(np.allclose(tr1_out, need_result)) + np.testing.assert_allclose(tr0_out[0], need_result, rtol=1e-05) + np.testing.assert_allclose(tr1_out[0], need_result, rtol=1e-05) elif col_type == "reduce": need_result = input1 + input2 - self.assertTrue(np.allclose(tr0_out, need_result)) + np.testing.assert_allclose(tr0_out[0], need_result, rtol=1e-05) elif col_type == "scatter": need_result = input2 need_result1 = need_result[0:need_result.shape[0] // 2] need_result2 = need_result[need_result.shape[0] // 2:] - self.assertTrue(np.allclose(tr0_out, need_result1)) - self.assertTrue(np.allclose(tr1_out, need_result2)) + np.testing.assert_allclose(tr0_out[0], need_result1, rtol=1e-05) + np.testing.assert_allclose(tr1_out[0], need_result2, rtol=1e-05) elif col_type == "allreduce": need_result = input1 + input2 - self.assertTrue( - np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr0_out[0], + need_result, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out[0], + need_result, + rtol=1e-05, + atol=1e-05) elif col_type == "parallel_embedding": result_data = tr0_out[0] np.random.seed(2020) @@ -356,15 +360,19 @@ class TestDistBase(unittest.TestCase): np.random.seed(2020) weight = np.random.rand(1000, 16) need_result = np.matmul(input1, weight) - self.assertTrue( - np.allclose(result_data, need_result, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(result_data, + need_result, + rtol=1e-05, + atol=1e-05) elif col_type == "column_parallel_linear": result_data = tr0_out[0] np.random.seed(2020) weight = np.random.rand(1000, 16) need_result = np.matmul(input1, weight) - self.assertTrue( - np.allclose(result_data, need_result, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(result_data, + need_result, + rtol=1e-05, + atol=1e-05) elif col_type == "alltoall": need_result1 = np.vstack((input1[0:input1.shape[0] // 2, :], input2[0:input2.shape[0] // 2, :])) @@ -372,14 +380,20 @@ class TestDistBase(unittest.TestCase): input2[input2.shape[0] // 2:, :])) tr0_out = np.vstack(tr0_out) tr1_out = np.vstack(tr1_out) - self.assertTrue( - np.allclose(tr0_out, need_result1, rtol=1e-05, atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out, need_result2, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr0_out, + need_result1, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out, + need_result2, + rtol=1e-05, + atol=1e-05) elif col_type == "sendrecv": result_data = tr1_out[0] - self.assertTrue( - np.allclose(input1, result_data, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(input1, + result_data, + rtol=1e-05, + atol=1e-05) elif col_type == "global_gather": in_feat = 2 n_expert = 2 @@ -470,21 +484,23 @@ class TestDistBase(unittest.TestCase): if tr1_out[0] is None or tr1_out[0].shape[0] == 0: tr1_out[0] = np.array([]) - self.assertTrue( - np.allclose(tr0_out[0], output1, rtol=1e-05, atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out[0], output2, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr0_out[0], + output1, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out[0], + output2, + rtol=1e-05, + atol=1e-05) if static_mode == 0: - self.assertTrue( - np.allclose(tr0_out[1], - 2 * local_input_buf1, - rtol=1e-05, - atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out[1], - 2 * local_input_buf2, - rtol=1e-05, - atol=1e-05)) + np.testing.assert_allclose(tr0_out[1], + 2 * local_input_buf1, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out[1], + 2 * local_input_buf2, + rtol=1e-05, + atol=1e-05) elif col_type == "global_scatter": np.random.seed(pid0) @@ -537,20 +553,22 @@ class TestDistBase(unittest.TestCase): if tr1_out[0] is None or tr1_out[0].shape[0] == 0: tr1_out[0] = np.array([]) - self.assertTrue( - np.allclose(tr0_out[0], output1, rtol=1e-05, atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out[0], output2, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr0_out[0], + output1, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out[0], + output2, + rtol=1e-05, + atol=1e-05) if static_mode == 0: - self.assertTrue( - np.allclose(tr0_out[1], - 2 * local_input_buf1, - rtol=1e-05, - atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out[1], - 2 * local_input_buf2, - rtol=1e-05, - atol=1e-05)) + np.testing.assert_allclose(tr0_out[1], + 2 * local_input_buf1, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out[1], + 2 * local_input_buf2, + rtol=1e-05, + atol=1e-05) else: pass diff --git a/python/paddle/fluid/tests/unittests/test_collective_base.py b/python/paddle/fluid/tests/unittests/test_collective_base.py index 6191b428900..4ed79de8621 100644 --- a/python/paddle/fluid/tests/unittests/test_collective_base.py +++ b/python/paddle/fluid/tests/unittests/test_collective_base.py @@ -241,44 +241,54 @@ class TestDistBase(unittest.TestCase): input2 = np.random.random((10, 1000)) if col_type == "allgather": need_result = np.vstack((input1, input2)) - self.assertTrue(np.allclose(tr0_out, need_result)) - self.assertTrue(np.allclose(tr1_out, need_result)) + np.testing.assert_allclose(tr0_out[0], need_result, rtol=1e-05) + np.testing.assert_allclose(tr1_out[0], need_result, rtol=1e-05) elif col_type == "broadcast": need_result = input2 - self.assertTrue(np.allclose(tr0_out, need_result)) - self.assertTrue(np.allclose(tr1_out, need_result)) + np.testing.assert_allclose(tr0_out[0], need_result, rtol=1e-05) + np.testing.assert_allclose(tr1_out[0], need_result, rtol=1e-05) elif col_type == "reduce": need_result = input1 + input2 - self.assertTrue(np.allclose(tr1_out, need_result)) + np.testing.assert_allclose(tr1_out[0], need_result, rtol=1e-05) elif col_type == "scatter": need_result = input2 need_result1 = need_result[0:need_result.shape[0] // 2] need_result2 = need_result[need_result.shape[0] // 2:] - self.assertTrue(np.allclose(tr0_out, need_result1)) - self.assertTrue(np.allclose(tr1_out, need_result2)) + np.testing.assert_allclose(tr0_out[0], need_result1, rtol=1e-05) + np.testing.assert_allclose(tr1_out[0], need_result2, rtol=1e-05) elif col_type == "allreduce": need_result = input1 + input2 - self.assertTrue( - np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr0_out[0], + need_result, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out[0], + need_result, + rtol=1e-05, + atol=1e-05) elif col_type == "reduce_scatter": tmp = input1 + input2 need_result1 = tmp[0:tmp.shape[0] // 2] need_result2 = tmp[tmp.shape[0] // 2:] - self.assertTrue( - np.allclose(tr0_out, need_result1, rtol=1e-05, atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out, need_result2, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr0_out[0], + need_result1, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out[0], + need_result2, + rtol=1e-05, + atol=1e-05) elif col_type == "sendrecv": need_result = input1 - self.assertTrue( - np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr1_out[0], + need_result, + rtol=1e-05, + atol=1e-05) elif col_type == "identity": need_result1 = input1 need_result2 = input2 - self.assertTrue(np.allclose(tr0_out, need_result1, rtol=0, atol=0)) - self.assertTrue(np.allclose(tr1_out, need_result2, rtol=0, atol=0)) + np.testing.assert_allclose(tr0_out[0], need_result1, rtol=0, atol=0) + np.testing.assert_allclose(tr1_out[0], need_result2, rtol=0, atol=0) elif col_type == "reduce_slicegather": slicesize = input1.shape[0] // 2 tmp10 = input1[0:slicesize] @@ -287,29 +297,39 @@ class TestDistBase(unittest.TestCase): tmp20 = input1[slicesize:] tmp21 = input2[slicesize:] need_result2 = np.concatenate((tmp20, tmp21), axis=1) - self.assertTrue(np.allclose(tr0_out, need_result1)) - self.assertTrue(np.allclose(tr1_out, need_result2)) + np.testing.assert_allclose(tr0_out, need_result1, rtol=1e-05) + np.testing.assert_allclose(tr1_out, need_result2, rtol=1e-05) elif col_type == "concat": need_result = np.concatenate((input1, input2), axis=1) - self.assertTrue( - np.allclose(tr0_out, need_result, rtol=1e-05, atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out, need_result, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr0_out[0], + need_result, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out[0], + need_result, + rtol=1e-05, + atol=1e-05) elif col_type == "split": need_result1 = np.split(input1, 2, axis=1)[0] need_result2 = np.split(input2, 2, axis=1)[1] - self.assertTrue( - np.allclose(tr0_out, need_result1, rtol=1e-05, atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out, need_result2, rtol=1e-05, atol=1e-05)) + np.testing.assert_allclose(tr0_out[0], + need_result1, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out[0], + need_result2, + rtol=1e-05, + atol=1e-05) elif col_type == "sendrecv_array": need_result1 = np.array([[0, 1, 2]]) need_result2 = np.array([[3, 4, 5]]) - self.assertTrue( - np.allclose(tr1_out[0][0], need_result1, rtol=1e-05, - atol=1e-05)) - self.assertTrue( - np.allclose(tr1_out[0][1], need_result2, rtol=1e-05, - atol=1e-05)) + np.testing.assert_allclose(tr1_out[0][0], + need_result1, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(tr1_out[0][1], + need_result2, + rtol=1e-05, + atol=1e-05) else: pass diff --git a/python/paddle/fluid/tests/unittests/test_complex_abs.py b/python/paddle/fluid/tests/unittests/test_complex_abs.py index 6c90e09d7ca..fc477a0521f 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_abs.py +++ b/python/paddle/fluid/tests/unittests/test_complex_abs.py @@ -105,7 +105,7 @@ class TestAbs(unittest.TestCase): for place in self._places: with dg.guard(place): y = paddle.abs(paddle.to_tensor(x)) - self.assertTrue(np.allclose(np.abs(x), y.numpy())) + np.testing.assert_allclose(np.abs(x), y.numpy(), rtol=1e-05) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_complex_cast.py b/python/paddle/fluid/tests/unittests/test_complex_cast.py index 21db0a78e72..3131499b4f5 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_cast.py +++ b/python/paddle/fluid/tests/unittests/test_complex_cast.py @@ -35,15 +35,21 @@ class TestComplexCastOp(unittest.TestCase): self.assertEqual(c_t.cast('float64').dtype, paddle.float64) self.assertEqual(c_t.cast('bool').dtype, paddle.bool) - self.assertTrue( - np.allclose(c_t.cast('int64').numpy(), r.astype('int64'))) - self.assertTrue( - np.allclose(c_t.cast('int32').numpy(), r.astype('int32'))) - self.assertTrue( - np.allclose(c_t.cast('float32').numpy(), r.astype('float32'))) - self.assertTrue( - np.allclose(c_t.cast('float64').numpy(), r.astype('float64'))) - self.assertTrue(np.allclose(c_t.cast('bool').numpy(), r.astype('bool'))) + np.testing.assert_allclose(c_t.cast('int64').numpy(), + r.astype('int64'), + rtol=1e-05) + np.testing.assert_allclose(c_t.cast('int32').numpy(), + r.astype('int32'), + rtol=1e-05) + np.testing.assert_allclose(c_t.cast('float32').numpy(), + r.astype('float32'), + rtol=1e-05) + np.testing.assert_allclose(c_t.cast('float64').numpy(), + r.astype('float64'), + rtol=1e-05) + np.testing.assert_allclose(c_t.cast('bool').numpy(), + r.astype('bool'), + rtol=1e-05) def test_real_to_complex(self): r = np.random.random(size=[10, 10]) * 10 @@ -52,8 +58,12 @@ class TestComplexCastOp(unittest.TestCase): self.assertEqual(r_t.cast('complex64').dtype, paddle.complex64) self.assertEqual(r_t.cast('complex128').dtype, paddle.complex128) - self.assertTrue(np.allclose(r_t.cast('complex64').real().numpy(), r)) - self.assertTrue(np.allclose(r_t.cast('complex128').real().numpy(), r)) + np.testing.assert_allclose(r_t.cast('complex64').real().numpy(), + r, + rtol=1e-05) + np.testing.assert_allclose(r_t.cast('complex128').real().numpy(), + r, + rtol=1e-05) def test_complex64_complex128(self): r = np.random.random(size=[10, 10]) @@ -65,10 +75,12 @@ class TestComplexCastOp(unittest.TestCase): self.assertTrue(c_64.cast('complex128').dtype, paddle.complex128) self.assertTrue(c_128.cast('complex128').dtype, paddle.complex64) - self.assertTrue( - np.allclose(c_64.cast('complex128').numpy(), c_128.numpy())) - self.assertTrue( - np.allclose(c_128.cast('complex128').numpy(), c_64.numpy())) + np.testing.assert_allclose(c_64.cast('complex128').numpy(), + c_128.numpy(), + rtol=1e-05) + np.testing.assert_allclose(c_128.cast('complex128').numpy(), + c_64.numpy(), + rtol=1e-05) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_complex_elementwise_layers.py b/python/paddle/fluid/tests/unittests/test_complex_elementwise_layers.py index c110339bf58..cd66246bfd5 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_elementwise_layers.py +++ b/python/paddle/fluid/tests/unittests/test_complex_elementwise_layers.py @@ -44,9 +44,12 @@ class TestComplexElementwiseLayers(unittest.TestCase): return paddle_apis[op](x_t, y_t).numpy() def assert_check(self, pd_result, np_result, place): - self.assertTrue( - np.allclose(pd_result, np_result), - "\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n". + np.testing.assert_allclose( + pd_result, + np_result, + rtol=1e-05, + err_msg= + '\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n'. format(place, pd_result[~np.isclose(pd_result, np_result)], np_result[~np.isclose(pd_result, np_result)])) diff --git a/python/paddle/fluid/tests/unittests/test_complex_kron.py b/python/paddle/fluid/tests/unittests/test_complex_kron.py index 4f15256a8c5..5d689fdc937 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_kron.py +++ b/python/paddle/fluid/tests/unittests/test_complex_kron.py @@ -43,7 +43,9 @@ class ComplexKronTestCase(unittest.TestCase): x_var = dg.to_variable(self.x) y_var = dg.to_variable(self.y) out_var = paddle.kron(x_var, y_var) - self.assertTrue(np.allclose(out_var.numpy(), self.ref_result)) + np.testing.assert_allclose(out_var.numpy(), + self.ref_result, + rtol=1e-05) def test_eager(self, place): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_complex_matmul.py b/python/paddle/fluid/tests/unittests/test_complex_matmul.py index 9be7933e926..55cf8516078 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_matmul.py +++ b/python/paddle/fluid/tests/unittests/test_complex_matmul.py @@ -35,9 +35,12 @@ class TestComplexMatMulLayer(unittest.TestCase): y_var = dg.to_variable(y) result = paddle.matmul(x_var, y_var) pd_result = result.numpy() - self.assertTrue( - np.allclose(pd_result, np_result), - "\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n" + np.testing.assert_allclose( + pd_result, + np_result, + rtol=1e-05, + err_msg= + '\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n' .format(place, pd_result[~np.isclose(pd_result, np_result)], np_result[~np.isclose(pd_result, np_result)])) @@ -48,9 +51,12 @@ class TestComplexMatMulLayer(unittest.TestCase): y_var = dg.to_variable(y) result = x_var.matmul(y_var) pd_result = result.numpy() - self.assertTrue( - np.allclose(pd_result, np_result), - "\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n" + np.testing.assert_allclose( + pd_result, + np_result, + rtol=1e-05, + err_msg= + '\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n' .format(place, pd_result[~np.isclose(pd_result, np_result)], np_result[~np.isclose(pd_result, np_result)])) diff --git a/python/paddle/fluid/tests/unittests/test_complex_op.py b/python/paddle/fluid/tests/unittests/test_complex_op.py index 49ad644b0ab..4ce5871a2b5 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_op.py +++ b/python/paddle/fluid/tests/unittests/test_complex_op.py @@ -142,7 +142,7 @@ class TestComplexAPI(unittest.TestCase): x = paddle.to_tensor(self.x) y = paddle.to_tensor(self.y) out_np = paddle.complex(x, y).numpy() - self.assertTrue(np.allclose(self.out, out_np)) + np.testing.assert_allclose(self.out, out_np, rtol=1e-05) def test_static(self): mp, sp = static.Program(), static.Program() @@ -159,7 +159,7 @@ class TestComplexAPI(unittest.TestCase): "y": self.y }, fetch_list=[out]) - self.assertTrue(np.allclose(self.out, out_np)) + np.testing.assert_allclose(self.out, out_np, rtol=1e-05) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_complex_reshape.py b/python/paddle/fluid/tests/unittests/test_complex_reshape.py index c80970b33a7..29bdb5d7c89 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_reshape.py +++ b/python/paddle/fluid/tests/unittests/test_complex_reshape.py @@ -39,7 +39,9 @@ class TestComplexReshape(unittest.TestCase): x_var = dg.to_variable(x_np) y_var = paddle.reshape(x_var, shape) y_np = y_var.numpy() - self.assertTrue(np.allclose(np.reshape(x_np, shape), y_np)) + np.testing.assert_allclose(np.reshape(x_np, shape), + y_np, + rtol=1e-05) def test_shape_omit_dims(self): for dtype in self._dtypes: @@ -53,7 +55,9 @@ class TestComplexReshape(unittest.TestCase): x_var = dg.to_variable(x_np) y_var = paddle.reshape(x_var, shape) y_np = y_var.numpy() - self.assertTrue(np.allclose(np.reshape(x_np, shape_), y_np)) + np.testing.assert_allclose(np.reshape(x_np, shape_), + y_np, + rtol=1e-05) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py b/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py index 3c43dbd4582..8dc3b433cd5 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py +++ b/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py @@ -40,7 +40,7 @@ class TestComplexSumLayer(unittest.TestCase): var_x = dg.to_variable(input) result = tensor.sum(var_x, axis=[1, 2]).numpy() target = np.sum(input, axis=(1, 2)) - self.assertTrue(np.allclose(result, target)) + np.testing.assert_allclose(result, target, rtol=1e-05) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py b/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py index 1618d20da2e..258164733d9 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py +++ b/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py @@ -41,7 +41,7 @@ class TestComplexTraceLayer(unittest.TestCase): result = tensor.trace(var_x, offset=1, axis1=0, axis2=2).numpy() target = np.trace(input, offset=1, axis1=0, axis2=2) - self.assertTrue(np.allclose(result, target)) + np.testing.assert_allclose(result, target, rtol=1e-05) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_complex_transpose.py b/python/paddle/fluid/tests/unittests/test_complex_transpose.py index bcbeabf8714..cfb8040674f 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_transpose.py +++ b/python/paddle/fluid/tests/unittests/test_complex_transpose.py @@ -39,7 +39,7 @@ class TestComplexTransposeLayer(unittest.TestCase): with dg.guard(place): var = dg.to_variable(data) trans = paddle.transpose(var, perm=perm) - self.assertTrue(np.allclose(trans.numpy(), np_trans)) + np.testing.assert_allclose(trans.numpy(), np_trans, rtol=1e-05) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_complex_variable.py b/python/paddle/fluid/tests/unittests/test_complex_variable.py index c9ebf27cc4d..c7173a7b814 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_variable.py +++ b/python/paddle/fluid/tests/unittests/test_complex_variable.py @@ -35,7 +35,7 @@ class TestComplexVariable(unittest.TestCase): out = paddle.fluid.layers.elementwise_add(x, y) self.assertIsNotNone("{}".format(out)) - self.assertTrue(np.allclose(out.numpy(), a + b)) + np.testing.assert_allclose(out.numpy(), a + b, rtol=1e-05) self.assertEqual(out.dtype, convert_np_dtype_to_dtype_(self._dtype)) self.assertEqual(out.shape, x.shape) diff --git a/python/paddle/fluid/tests/unittests/test_complex_view_op.py b/python/paddle/fluid/tests/unittests/test_complex_view_op.py index 379eb99901b..baff6984522 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_view_op.py +++ b/python/paddle/fluid/tests/unittests/test_complex_view_op.py @@ -92,7 +92,7 @@ class TestViewAsComplexAPI(unittest.TestCase): with dygraph.guard(): x = paddle.to_tensor(self.x) out_np = paddle.as_complex(x).numpy() - self.assertTrue(np.allclose(self.out, out_np)) + np.testing.assert_allclose(self.out, out_np, rtol=1e-05) def test_static(self): mp, sp = static.Program(), static.Program() @@ -103,7 +103,7 @@ class TestViewAsComplexAPI(unittest.TestCase): exe = static.Executor() exe.run(sp) [out_np] = exe.run(mp, feed={"x": self.x}, fetch_list=[out]) - self.assertTrue(np.allclose(self.out, out_np)) + np.testing.assert_allclose(self.out, out_np, rtol=1e-05) def test_eager(self): with _test_eager_guard(): @@ -120,7 +120,7 @@ class TestViewAsRealAPI(unittest.TestCase): with dygraph.guard(): x = paddle.to_tensor(self.x) out_np = paddle.as_real(x).numpy() - self.assertTrue(np.allclose(self.out, out_np)) + np.testing.assert_allclose(self.out, out_np, rtol=1e-05) def test_static(self): mp, sp = static.Program(), static.Program() @@ -131,7 +131,7 @@ class TestViewAsRealAPI(unittest.TestCase): exe = static.Executor() exe.run(sp) [out_np] = exe.run(mp, feed={"x": self.x}, fetch_list=[out]) - self.assertTrue(np.allclose(self.out, out_np)) + np.testing.assert_allclose(self.out, out_np, rtol=1e-05) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_cond.py b/python/paddle/fluid/tests/unittests/test_cond.py index 61043cab36a..3a74f18ff62 100644 --- a/python/paddle/fluid/tests/unittests/test_cond.py +++ b/python/paddle/fluid/tests/unittests/test_cond.py @@ -438,9 +438,10 @@ class TestCondBackward(unittest.TestCase): numerical_grad[0][j] = (loss_delta[0] - loss_value[0]) / delta feed_img_delta[0][j] = feed_img[0][j] - self.assertTrue( - np.isclose(img_grad, numerical_grad, atol=0.05, - rtol=0.05).all()) + np.testing.assert_allclose(img_grad, + numerical_grad, + rtol=0.05, + atol=0.05) def add_optimizer_helper(self, cond_func, use_cuda, use_parallel_exe): """ diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_layer.py b/python/paddle/fluid/tests/unittests/test_conv2d_layer.py index b8c6f1dfa2f..8782ee84a26 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_layer.py @@ -197,8 +197,8 @@ class Conv2DTestCase(unittest.TestCase): res_eager, g2 = self.paddle_nn_layer() np.testing.assert_array_almost_equal(result1, result2) np.testing.assert_array_almost_equal(result2, result3) - self.assertTrue(np.allclose(result3, res_eager)) - self.assertTrue(np.allclose(g1, g2)) + np.testing.assert_allclose(result3, res_eager, rtol=1e-05) + np.testing.assert_allclose(g1, g2, rtol=1e-05) def runTest(self): place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_layer.py b/python/paddle/fluid/tests/unittests/test_conv3d_layer.py index 42c23eb64fd..5672fe0dd88 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_layer.py @@ -165,8 +165,8 @@ class Conv3DTestCase(unittest.TestCase): res_eager, g2 = self.paddle_nn_layer() np.testing.assert_array_almost_equal(result1, result2) np.testing.assert_array_almost_equal(result2, result3) - self.assertTrue(np.allclose(result3, res_eager)) - self.assertTrue(np.allclose(g1, g2)) + np.testing.assert_allclose(result3, res_eager, rtol=1e-05) + np.testing.assert_allclose(g1, g2, rtol=1e-05) def runTest(self): place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_corr.py b/python/paddle/fluid/tests/unittests/test_corr.py index 6a9d931e22d..7c5127c5ba4 100644 --- a/python/paddle/fluid/tests/unittests/test_corr.py +++ b/python/paddle/fluid/tests/unittests/test_corr.py @@ -52,10 +52,14 @@ class Corr_Test(unittest.TestCase): corr = paddle.linalg.corrcoef(tensor) np_corr = numpy_corr(np_arr, rowvar=True, dtype=dtype) if dtype == 'float32': - self.assertTrue( - np.allclose(np_corr, corr.numpy(), atol=1.e-5)) + np.testing.assert_allclose(np_corr, + corr.numpy(), + rtol=1e-05, + atol=1e-05) else: - self.assertTrue(np.allclose(np_corr, corr.numpy())) + np.testing.assert_allclose(np_corr, + corr.numpy(), + rtol=1e-05) def test_tensor_corr_rowvar(self): typelist = ['float64', 'float32'] @@ -75,10 +79,14 @@ class Corr_Test(unittest.TestCase): corr = paddle.linalg.corrcoef(tensor, rowvar=False) np_corr = numpy_corr(np_arr, rowvar=False, dtype=dtype) if dtype == 'float32': - self.assertTrue( - np.allclose(np_corr, corr.numpy(), atol=1.e-5)) + np.testing.assert_allclose(np_corr, + corr.numpy(), + rtol=1e-05, + atol=1e-05) else: - self.assertTrue(np.allclose(np_corr, corr.numpy())) + np.testing.assert_allclose(np_corr, + corr.numpy(), + rtol=1e-05) # Input(x) only support N-D (1<=N<=2) tensor diff --git a/python/paddle/fluid/tests/unittests/test_cosine_embedding_loss.py b/python/paddle/fluid/tests/unittests/test_cosine_embedding_loss.py index f95089a4cdb..67c22dd0b71 100644 --- a/python/paddle/fluid/tests/unittests/test_cosine_embedding_loss.py +++ b/python/paddle/fluid/tests/unittests/test_cosine_embedding_loss.py @@ -64,7 +64,7 @@ class TestFunctionCosineEmbeddingLoss(unittest.TestCase): self.label_np, margin=0.5, reduction='mean') - self.assertTrue(np.allclose(dy_result.numpy(), expected1)) + np.testing.assert_allclose(dy_result.numpy(), expected1, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) dy_result = paddle.nn.functional.cosine_embedding_loss(input1, @@ -78,7 +78,7 @@ class TestFunctionCosineEmbeddingLoss(unittest.TestCase): margin=0.5, reduction='sum') - self.assertTrue(np.allclose(dy_result.numpy(), expected2)) + np.testing.assert_allclose(dy_result.numpy(), expected2, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) dy_result = paddle.nn.functional.cosine_embedding_loss(input1, @@ -92,7 +92,7 @@ class TestFunctionCosineEmbeddingLoss(unittest.TestCase): margin=0.5, reduction='none') - self.assertTrue(np.allclose(dy_result.numpy(), expected3)) + np.testing.assert_allclose(dy_result.numpy(), expected3, rtol=1e-05) self.assertTrue(dy_result.shape, [5]) def run_static(self, use_gpu=False): @@ -130,21 +130,21 @@ class TestFunctionCosineEmbeddingLoss(unittest.TestCase): margin=0.5, reduction='none') - self.assertTrue(np.allclose(static_result[0], expected)) + np.testing.assert_allclose(static_result[0], expected, rtol=1e-05) expected = cosine_embedding_loss(self.input1_np, self.input2_np, self.label_np, margin=0.5, reduction='sum') - self.assertTrue(np.allclose(static_result[1], expected)) + np.testing.assert_allclose(static_result[1], expected, rtol=1e-05) expected = cosine_embedding_loss(self.input1_np, self.input2_np, self.label_np, margin=0.5, reduction='mean') - self.assertTrue(np.allclose(static_result[2], expected)) + np.testing.assert_allclose(static_result[2], expected, rtol=1e-05) def test_cpu(self): paddle.disable_static(place=paddle.CPUPlace()) @@ -262,7 +262,7 @@ class TestClassCosineEmbeddingLoss(unittest.TestCase): self.label_np, margin=0.5, reduction='mean') - self.assertTrue(np.allclose(dy_result.numpy(), expected1)) + np.testing.assert_allclose(dy_result.numpy(), expected1, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) input1_1D = paddle.to_tensor(self.input1_np_1D) @@ -274,7 +274,7 @@ class TestClassCosineEmbeddingLoss(unittest.TestCase): self.label_np_1D, margin=0.5, reduction='mean') - self.assertTrue(np.allclose(dy_result.numpy(), expected2)) + np.testing.assert_allclose(dy_result.numpy(), expected2, rtol=1e-05) def run_static(self): input1 = static.data(name='input1', shape=[10, 3], dtype='float32') @@ -299,7 +299,7 @@ class TestClassCosineEmbeddingLoss(unittest.TestCase): margin=0.5, reduction='mean') - self.assertTrue(np.allclose(static_result[0], expected)) + np.testing.assert_allclose(static_result[0], expected, rtol=1e-05) def test_cpu(self): paddle.disable_static(place=paddle.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py index 45000c3aef8..3b7ab32d14c 100644 --- a/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py +++ b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py @@ -61,7 +61,7 @@ class TestCosineSimilarityAPI(unittest.TestCase): fetch_list=[result]) np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps) - self.assertTrue(np.allclose(fetches[0], np_out)) + np.testing.assert_allclose(fetches[0], np_out, rtol=1e-05) def test_static(self): for place in self.places: @@ -82,7 +82,7 @@ class TestCosineSimilarityAPI(unittest.TestCase): tesnor_x2 = paddle.to_tensor(np_x2) y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps) - self.assertTrue(np.allclose(y.numpy(), np_out)) + np.testing.assert_allclose(y.numpy(), np_out, rtol=1e-05) def test_dygraph_2(self): paddle.disable_static() @@ -99,7 +99,7 @@ class TestCosineSimilarityAPI(unittest.TestCase): tesnor_x2 = paddle.to_tensor(np_x2) y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps) - self.assertTrue(np.allclose(y.numpy(), np_out)) + np.testing.assert_allclose(y.numpy(), np_out, rtol=1e-05) def test_dygraph_3(self): paddle.disable_static() @@ -117,7 +117,7 @@ class TestCosineSimilarityAPI(unittest.TestCase): tesnor_x2 = paddle.to_tensor(np_x2) y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps) - self.assertTrue(np.allclose(y.numpy(), np_out)) + np.testing.assert_allclose(y.numpy(), np_out, rtol=1e-05) def test_dygraph_4(self): paddle.disable_static() @@ -136,7 +136,7 @@ class TestCosineSimilarityAPI(unittest.TestCase): tesnor_x2 = paddle.to_tensor(np_x2) y = cos_sim_func(tesnor_x1, tesnor_x2) - self.assertTrue(np.allclose(y.numpy(), np_out)) + np.testing.assert_allclose(y.numpy(), np_out, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py b/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py index 8f1d4fe8500..da48f340d90 100644 --- a/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py +++ b/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py @@ -48,7 +48,7 @@ class TestCountNonzeroAPI(unittest.TestCase): fetch_list=[out1, out2, out3, out4, out5]) out_ref = np.count_nonzero(self.x) for out in res: - self.assertEqual(np.allclose(out, out_ref), True) + np.testing.assert_allclose(out, out_ref, rtol=1e-05) def test_api_dygraph(self): paddle.disable_static(self.place) @@ -62,7 +62,7 @@ class TestCountNonzeroAPI(unittest.TestCase): axis = None out_ref = np.count_nonzero(x, axis, keepdims=keepdim) - self.assertEqual(np.allclose(out.numpy(), out_ref), True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-05) test_case(self.x) test_case(self.x, None) diff --git a/python/paddle/fluid/tests/unittests/test_cov.py b/python/paddle/fluid/tests/unittests/test_cov.py index c67b2c2d357..359911cc326 100644 --- a/python/paddle/fluid/tests/unittests/test_cov.py +++ b/python/paddle/fluid/tests/unittests/test_cov.py @@ -59,7 +59,7 @@ class Cov_Test(unittest.TestCase): ddof=1, fweights=None, aweights=None) - self.assertTrue(np.allclose(np_cov, cov.numpy())) + np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05) def test_tensor_cov_default(self): with _test_eager_guard(): @@ -91,7 +91,7 @@ class Cov_Test(unittest.TestCase): ddof=1, fweights=None, aweights=None) - self.assertTrue(np.allclose(np_cov, cov.numpy())) + np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05) def test_tensor_cov_rowvar(self): with _test_eager_guard(): @@ -123,7 +123,7 @@ class Cov_Test(unittest.TestCase): ddof=0, fweights=None, aweights=None) - self.assertTrue(np.allclose(np_cov, cov.numpy())) + np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05) def test_tensor_cov_ddof(self): with _test_eager_guard(): @@ -158,7 +158,7 @@ class Cov_Test(unittest.TestCase): ddof=1, fweights=np_fw, aweights=None) - self.assertTrue(np.allclose(np_cov, cov.numpy())) + np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05) def test_tensor_cov_fweights(self): with _test_eager_guard(): @@ -193,7 +193,7 @@ class Cov_Test(unittest.TestCase): ddof=1, fweights=None, aweights=np_aw) - self.assertTrue(np.allclose(np_cov, cov.numpy())) + np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05) def test_tensor_cov_aweights(self): with _test_eager_guard(): @@ -230,7 +230,7 @@ class Cov_Test(unittest.TestCase): ddof=1, fweights=np_fw, aweights=np_aw) - self.assertTrue(np.allclose(np_cov, cov.numpy())) + np.testing.assert_allclose(np_cov, cov.numpy(), rtol=1e-05) def test_tensor_cov_weights(self): with _test_eager_guard(): @@ -259,10 +259,10 @@ class Cov_Test3(unittest.TestCase): def test_err(): np_arr = np.random.rand(*self.shape).astype('float64') - np_fw = self.fw_s * np.random.rand(*self.fweightshape).astype( - 'int32') - np_aw = self.aw_s * np.random.rand(*self.aweightshape).astype( - 'float64') + np_fw = self.fw_s * np.random.rand( + *self.fweightshape).astype('int32') + np_aw = self.aw_s * np.random.rand( + *self.aweightshape).astype('float64') tensor = paddle.to_tensor(np_arr) fweights = paddle.to_tensor(np_fw) aweights = paddle.to_tensor(np_aw) diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py index 4982ae59d43..624f10d1e66 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py @@ -240,8 +240,10 @@ class CrossEntropyLoss(unittest.TestCase): if self.weight is not None else None, reduction=self.reduction) - self.assertTrue(np.allclose(paddle_loss_swce.numpy(), expected)) - self.assertTrue(np.allclose(paddle_loss_ce.numpy(), expected)) + np.testing.assert_allclose(paddle_loss_swce.numpy(), + expected, + rtol=1e-05) + np.testing.assert_allclose(paddle_loss_ce.numpy(), expected, rtol=1e-05) ###soft_label test start ###soft_label test 1 @@ -316,8 +318,8 @@ class CrossEntropyLoss(unittest.TestCase): self.assertIsNotNone(static_ret) paddle.disable_static() - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) ###soft_label test 2 def test_cross_entropy_loss_soft_1d_weight(self): @@ -402,8 +404,8 @@ class CrossEntropyLoss(unittest.TestCase): self.assertIsNotNone(static_ret) paddle.disable_static() - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) ###soft_label test 3 def test_cross_entropy_loss_soft_1d_mean(self): @@ -477,8 +479,8 @@ class CrossEntropyLoss(unittest.TestCase): self.assertIsNotNone(static_ret) paddle.disable_static() - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) ###soft_label test 4 def test_cross_entropy_loss_soft_1d_weight_mean(self): @@ -553,8 +555,8 @@ class CrossEntropyLoss(unittest.TestCase): self.assertIsNotNone(static_ret) paddle.disable_static() - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) ###soft_label test 5 def test_cross_entropy_loss_soft_2d(self): @@ -632,9 +634,9 @@ class CrossEntropyLoss(unittest.TestCase): self.assertIsNotNone(static_ret) paddle.disable_static() - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) ###soft_label test 6 def test_cross_entropy_loss_soft_2d_weight_mean(self): @@ -713,9 +715,9 @@ class CrossEntropyLoss(unittest.TestCase): self.assertIsNotNone(static_ret) paddle.disable_static() - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) ###soft_label test end @@ -751,9 +753,9 @@ class CrossEntropyLoss(unittest.TestCase): dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_1d(input_np, label_np, ignore_index=0)[0] - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_with_mean_ignore_negative(self): N = 100 @@ -789,9 +791,9 @@ class CrossEntropyLoss(unittest.TestCase): self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_1d(input_np, label_np, ignore_index=-1)[0] - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_with_weight_mean_ignore(self): N = 100 @@ -837,9 +839,9 @@ class CrossEntropyLoss(unittest.TestCase): weight=weight_np, ignore_index=0)[0] - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_with_weight_mean_ignore_exceedlabel(self): N = 100 @@ -861,7 +863,7 @@ class CrossEntropyLoss(unittest.TestCase): weight=weight_np, ignore_index=255)[0] - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_with_weight_mean(self): input_np = np.random.random([2, 4]).astype(self.dtype) @@ -901,9 +903,9 @@ class CrossEntropyLoss(unittest.TestCase): self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_1d(input_np, label_np, weight=weight_np)[0] - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_with_weight_sum(self): input_np = np.random.random([100, 200]).astype(self.dtype) #N,C @@ -942,9 +944,9 @@ class CrossEntropyLoss(unittest.TestCase): label_np, weight=weight_np, reduction='sum')[0] - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_with_weight_none(self): input_np = np.random.random([100, 200]).astype(self.dtype) #N,C @@ -987,9 +989,9 @@ class CrossEntropyLoss(unittest.TestCase): label_np, weight=weight_np, reduction='none') - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_with_weight_none_func(self): input_np = np.random.random([100, 200]).astype(self.dtype) #N,C @@ -1032,9 +1034,9 @@ class CrossEntropyLoss(unittest.TestCase): label_np, weight=weight_np, reduction='none') - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_mean(self): input_np = np.random.random([100, 200]).astype(self.dtype) #N,C @@ -1064,9 +1066,9 @@ class CrossEntropyLoss(unittest.TestCase): dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_1d(input_np, label_np)[0] - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_sum(self): input_np = np.random.random([100, 200]).astype(self.dtype) #N,C @@ -1098,9 +1100,9 @@ class CrossEntropyLoss(unittest.TestCase): dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_1d(input_np, label_np, reduction='sum')[0] - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_1d_none(self): input_np = np.random.random([100, 200]).astype(self.dtype) #N,C @@ -1134,9 +1136,9 @@ class CrossEntropyLoss(unittest.TestCase): dy_ret_value = np.squeeze(dy_ret_value) self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_1d(input_np, label_np, reduction='none') - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_with_weight_none(self): input_np = np.random.random(size=(2, 2, 2, 3)).astype(self.dtype) #NHWC @@ -1181,9 +1183,9 @@ class CrossEntropyLoss(unittest.TestCase): label_np, weight=weight_np, reduction='none') - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_with_weight_axis_change_mean(self): input_np = np.random.random(size=(2, 3, 2, 2)).astype(self.dtype) #NCHW @@ -1230,9 +1232,9 @@ class CrossEntropyLoss(unittest.TestCase): label_np, weight=weight_np, reduction='mean')[0] - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_with_weight_mean_ignore_exceedlabel(self): N = 4 @@ -1254,7 +1256,7 @@ class CrossEntropyLoss(unittest.TestCase): label_np, weight=weight_np, ignore_index=255)[0] - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_with_weight_mean(self): input_np = np.random.random(size=(2, 2, 2, 3)).astype(self.dtype) #NHWC @@ -1296,9 +1298,9 @@ class CrossEntropyLoss(unittest.TestCase): label_np, weight=weight_np, reduction='mean')[0] - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_with_weight_sum(self): input_np = np.random.random(size=(2, 2, 2, 3)).astype(self.dtype) #NHWC @@ -1341,9 +1343,9 @@ class CrossEntropyLoss(unittest.TestCase): label_np, weight=weight_np, reduction='sum')[0] - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_none(self): input_np = np.random.random(size=(2, 2, 2, 3)).astype(self.dtype) #NHWC @@ -1380,9 +1382,9 @@ class CrossEntropyLoss(unittest.TestCase): dy_ret_value = np.squeeze(dy_ret_value) self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_2d(input_np, label_np, reduction='none') - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_mean(self): input_np = np.random.random(size=(2, 2, 2, 3)).astype(self.dtype) #NHWC @@ -1419,9 +1421,9 @@ class CrossEntropyLoss(unittest.TestCase): self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_2d(input_np, label_np, reduction='mean')[0] - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_cross_entropy_loss_2d_sum(self): input_np = np.random.random(size=(2, 2, 2, 3)).astype(self.dtype) #NHWC @@ -1457,9 +1459,9 @@ class CrossEntropyLoss(unittest.TestCase): dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) expected = cross_entropy_loss_2d(input_np, label_np, reduction='sum')[0] - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, expected)) - self.assertTrue(np.allclose(dy_ret_value, expected)) + np.testing.assert_allclose(static_ret[0], dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) + np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) def test_soft_1d_dygraph_final_state_api(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_cross_op.py b/python/paddle/fluid/tests/unittests/test_cross_op.py index b54883975a6..8d0a3465706 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_op.py @@ -93,7 +93,7 @@ class TestCrossAPI(unittest.TestCase): return_numpy=False) expect_out = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) # case 2: with program_guard(Program(), Program()): @@ -109,7 +109,7 @@ class TestCrossAPI(unittest.TestCase): return_numpy=False) expect_out = np.array([[-1.0, -1.0, -1.0], [2.0, 2.0, 2.0], [-1.0, -1.0, -1.0]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) # case 3: with program_guard(Program(), Program()): @@ -129,7 +129,7 @@ class TestCrossAPI(unittest.TestCase): # np_z = z.numpy() # expect_out = np.array([[-1.0, -1.0, -1.0], [2.0, 2.0, 2.0], # [-1.0, -1.0, -1.0]]) - # self.assertTrue(np.allclose(expect_out, np_z)) + # np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 2: with fluid.dygraph.guard(): @@ -139,7 +139,7 @@ class TestCrossAPI(unittest.TestCase): np_z = z.numpy() expect_out = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) - self.assertTrue(np.allclose(expect_out, np_z)) + np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_cuda_random_seed.py b/python/paddle/fluid/tests/unittests/test_cuda_random_seed.py index ef886d2067a..ad854bebd01 100644 --- a/python/paddle/fluid/tests/unittests/test_cuda_random_seed.py +++ b/python/paddle/fluid/tests/unittests/test_cuda_random_seed.py @@ -78,7 +78,7 @@ class TestGeneratorSeed(unittest.TestCase): if core.is_compiled_with_cuda(): print(">>>>>>> dropout dygraph >>>>>>>") - self.assertTrue(np.allclose(y_np, y1_np)) + np.testing.assert_allclose(y_np, y1_np, rtol=1e-05) def test_generator_gaussian_random_dygraph(self): """Test Generator seed.""" @@ -97,8 +97,8 @@ class TestGeneratorSeed(unittest.TestCase): if core.is_compiled_with_cuda(): print(">>>>>>> gaussian random dygraph >>>>>>>") - self.assertTrue(np.allclose(x1_np, x2_np)) - self.assertTrue(np.allclose(x2_np, x3_np)) + np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) + np.testing.assert_allclose(x2_np, x3_np, rtol=1e-05) def test_generator_randint_dygraph(self): """Test Generator seed.""" @@ -120,7 +120,7 @@ class TestGeneratorSeed(unittest.TestCase): if core.is_compiled_with_cuda(): print(">>>>>>> randint dygraph >>>>>>>") - self.assertTrue(np.allclose(x_np, x3_np)) + np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) def test_gen_TruncatedNormal_initializer(self): fluid.disable_dygraph() @@ -165,8 +165,8 @@ class TestGeneratorSeed(unittest.TestCase): if core.is_compiled_with_cuda(): print(">>>>>>> truncated normal static >>>>>>>") - self.assertTrue(np.allclose(out1_res1, out2_res1)) - self.assertTrue(np.allclose(out1_res2, out2_res2)) + np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05) + np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05) self.assertTrue(not np.allclose(out1_res2, out1_res1)) diff --git a/python/paddle/fluid/tests/unittests/test_cudnn_grucell.py b/python/paddle/fluid/tests/unittests/test_cudnn_grucell.py index 3b7093db391..69e9bd3bb69 100644 --- a/python/paddle/fluid/tests/unittests/test_cudnn_grucell.py +++ b/python/paddle/fluid/tests/unittests/test_cudnn_grucell.py @@ -147,9 +147,11 @@ class TestCudnnGRU(unittest.TestCase): np_out = cudnn_step(step_input_np, pre_hidden_np, weight_ih, bias_ih, weight_hh, bias_hh) - self.assertTrue(np.allclose(api_out.numpy(), np_out, rtol=1e-5, atol=0)) - self.assertTrue( - np.allclose(named_api_out.numpy(), np_out, rtol=1e-5, atol=0)) + np.testing.assert_allclose(api_out.numpy(), np_out, rtol=1e-05, atol=0) + np.testing.assert_allclose(named_api_out.numpy(), + np_out, + rtol=1e-05, + atol=0) class TestNonCudnnGRU(unittest.TestCase): @@ -226,9 +228,11 @@ class TestNonCudnnGRU(unittest.TestCase): np_out = non_cudnn_step(step_input_np, pre_hidden_np, gate_w, gate_b, candidate_w, candidate_b) - self.assertTrue(np.allclose(api_out.numpy(), np_out, rtol=1e-5, atol=0)) - self.assertTrue( - np.allclose(named_api_out.numpy(), np_out, rtol=1e-5, atol=0)) + np.testing.assert_allclose(api_out.numpy(), np_out, rtol=1e-05, atol=0) + np.testing.assert_allclose(named_api_out.numpy(), + np_out, + rtol=1e-05, + atol=0) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_cudnn_lstmcell.py b/python/paddle/fluid/tests/unittests/test_cudnn_lstmcell.py index 36b563a97c7..f7ba33bd5e2 100644 --- a/python/paddle/fluid/tests/unittests/test_cudnn_lstmcell.py +++ b/python/paddle/fluid/tests/unittests/test_cudnn_lstmcell.py @@ -157,26 +157,22 @@ class TestCudnnLSTM(unittest.TestCase): pre_hidden_np, pre_cell_np, weight_ih, bias_ih, weight_hh, bias_hh) - self.assertTrue( - np.allclose(api_hidden_out.numpy(), - np_hidden_out, - rtol=1e-5, - atol=0)) - self.assertTrue( - np.allclose(api_cell_out.numpy(), - np_cell_out, - rtol=1e-5, - atol=0)) - self.assertTrue( - np.allclose(named_api_hidden_out.numpy(), - np_hidden_out, - rtol=1e-5, - atol=0)) - self.assertTrue( - np.allclose(named_api_cell_out.numpy(), - np_cell_out, - rtol=1e-5, - atol=0)) + np.testing.assert_allclose(api_hidden_out.numpy(), + np_hidden_out, + rtol=1e-05, + atol=0) + np.testing.assert_allclose(api_cell_out.numpy(), + np_cell_out, + rtol=1e-05, + atol=0) + np.testing.assert_allclose(named_api_hidden_out.numpy(), + np_hidden_out, + rtol=1e-05, + atol=0) + np.testing.assert_allclose(named_api_cell_out.numpy(), + np_cell_out, + rtol=1e-05, + atol=0) class TestNonCudnnLSTM(unittest.TestCase): @@ -250,26 +246,22 @@ class TestNonCudnnLSTM(unittest.TestCase): pre_cell_np, gate_w, gate_b) - self.assertTrue( - np.allclose(api_hidden_out.numpy(), - np_hidden_out, - rtol=1e-5, - atol=0)) - self.assertTrue( - np.allclose(api_cell_out.numpy(), - np_cell_out, - rtol=1e-5, - atol=0)) - self.assertTrue( - np.allclose(named_api_hidden_out.numpy(), - np_hidden_out, - rtol=1e-5, - atol=0)) - self.assertTrue( - np.allclose(named_api_cell_out.numpy(), - np_cell_out, - rtol=1e-5, - atol=0)) + np.testing.assert_allclose(api_hidden_out.numpy(), + np_hidden_out, + rtol=1e-05, + atol=0) + np.testing.assert_allclose(api_cell_out.numpy(), + np_cell_out, + rtol=1e-05, + atol=0) + np.testing.assert_allclose(named_api_hidden_out.numpy(), + np_hidden_out, + rtol=1e-05, + atol=0) + np.testing.assert_allclose(named_api_cell_out.numpy(), + np_cell_out, + rtol=1e-05, + atol=0) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_cumprod_op.py b/python/paddle/fluid/tests/unittests/test_cumprod_op.py index 66b4a601973..94eb2d36078 100644 --- a/python/paddle/fluid/tests/unittests/test_cumprod_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumprod_op.py @@ -179,7 +179,7 @@ class TestCumprodAPI(unittest.TestCase): out_ref = np.cumprod(self.x, -2) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) for place in self.place: run(place) @@ -192,7 +192,7 @@ class TestCumprodAPI(unittest.TestCase): x = paddle.to_tensor(self.x) out = paddle.cumprod(x, 1) out_ref = np.cumprod(self.x, 1) - self.assertEqual(np.allclose(out_ref, out.numpy()), True) + np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) paddle.enable_static() for place in self.place: diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index 1989a8c1448..dfcd34cebfe 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -72,15 +72,15 @@ class TestCumsumOp(unittest.TestCase): ]) z = np.cumsum(data_np) - self.assertTrue(np.allclose(z, out[0])) + np.testing.assert_allclose(z, out[0], rtol=1e-05) z = np.cumsum(data_np, axis=0) - self.assertTrue(np.allclose(z, out[1])) + np.testing.assert_allclose(z, out[1], rtol=1e-05) z = np.cumsum(data_np, axis=-1) - self.assertTrue(np.allclose(z, out[2])) + np.testing.assert_allclose(z, out[2], rtol=1e-05) self.assertTrue(out[3].dtype == np.float64) self.assertTrue(out[4].dtype == np.int32) z = np.cumsum(data_np, axis=-2) - self.assertTrue(np.allclose(z, out[5])) + np.testing.assert_allclose(z, out[5], rtol=1e-05) def test_cpu(self): paddle.disable_static(paddle.fluid.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_custom_grad_input.py b/python/paddle/fluid/tests/unittests/test_custom_grad_input.py index 2d12243de52..4a1e3085368 100644 --- a/python/paddle/fluid/tests/unittests/test_custom_grad_input.py +++ b/python/paddle/fluid/tests/unittests/test_custom_grad_input.py @@ -48,7 +48,9 @@ class TestTensorBackward(unittest.TestCase): x_grad = np.matmul(grad, y.T) - self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy())) + np.testing.assert_allclose(x_grad, + x_tensor.grad.numpy(), + rtol=1e-05) def test_tensor_backward(self): with _test_eager_guard(): @@ -83,8 +85,9 @@ class TestBackwardAPI(unittest.TestCase): x_grad = np.matmul(grad, y.T) - self.assertTrue( - np.allclose(x_grad * 2, x_tensor.grad.numpy())) + np.testing.assert_allclose(x_grad * 2, + x_tensor.grad.numpy(), + rtol=1e-05) def test_backward_api(self): with _test_eager_guard(): @@ -108,7 +111,9 @@ class TestBackwardAPI(unittest.TestCase): x_grad = np.matmul(grad, y.T) - self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy())) + np.testing.assert_allclose(x_grad, + x_tensor.grad.numpy(), + rtol=1e-05) def test_backward_single_tensor(self): with _test_eager_guard(): @@ -131,7 +136,9 @@ class TestBackwardAPI(unittest.TestCase): x_grad = np.matmul(grad, y.T) - self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy())) + np.testing.assert_allclose(x_grad, + x_tensor.grad.numpy(), + rtol=1e-05) def test_backward_none_grad_tensor(self): with _test_eager_guard(): @@ -165,7 +172,9 @@ class TestBackwardAPI(unittest.TestCase): z = x**3 x_grad = 2 * x * (y_grad + 3 * y * y * z_grad) - self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy())) + np.testing.assert_allclose(x_grad, + x_tensor.grad.numpy(), + rtol=1e-05) def test_backward_accumulator_with_init_grad(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_data_norm_op.py b/python/paddle/fluid/tests/unittests/test_data_norm_op.py index 650ca5ca134..d40a15ed3e0 100644 --- a/python/paddle/fluid/tests/unittests/test_data_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_data_norm_op.py @@ -71,7 +71,11 @@ class TestDataNormOpInference(unittest.TestCase): self.use_mkldnn = False def __assert_close(self, tensor, np_array, msg, atol=1e-4): - self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) + np.testing.assert_allclose(np.array(tensor), + np_array, + rtol=1e-05, + atol=atol, + err_msg=msg) def check_with_place(self, place, diff --git a/python/paddle/fluid/tests/unittests/test_deg2rad.py b/python/paddle/fluid/tests/unittests/test_deg2rad.py index c3e77c0ac5d..274b6b64f46 100644 --- a/python/paddle/fluid/tests/unittests/test_deg2rad.py +++ b/python/paddle/fluid/tests/unittests/test_deg2rad.py @@ -53,7 +53,7 @@ class TestDeg2radAPI(unittest.TestCase): paddle.disable_static() x1 = paddle.to_tensor([180.0, -180.0, 360.0, -360.0, 90.0, -90.0]) result1 = paddle.deg2rad(x1) - self.assertEqual(np.allclose(self.out_np, result1.numpy()), True) + np.testing.assert_allclose(self.out_np, result1.numpy(), rtol=1e-05) paddle.enable_static() @@ -71,6 +71,6 @@ class TestDeg2radAPI2(TestDeg2radAPI): x2 = paddle.to_tensor(180) result2 = paddle.deg2rad(x2) - self.assertEqual(np.allclose(np.pi, result2.numpy()), True) + np.testing.assert_allclose(np.pi, result2.numpy(), rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_determinant_op.py b/python/paddle/fluid/tests/unittests/test_determinant_op.py index 8b368485211..bad4d0c219f 100644 --- a/python/paddle/fluid/tests/unittests/test_determinant_op.py +++ b/python/paddle/fluid/tests/unittests/test_determinant_op.py @@ -85,14 +85,14 @@ class TestDeterminantAPI(unittest.TestCase): out_ref = np.linalg.det(self.x) for out in res: - self.assertEqual(np.allclose(out, out_ref, rtol=1e-03), True) + np.testing.assert_allclose(out, out_ref, rtol=0.001) def test_api_dygraph(self): paddle.disable_static(self.place) x_tensor = paddle.to_tensor(self.x) out = paddle.linalg.det(x_tensor) out_ref = np.linalg.det(self.x) - self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-03), True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.001) paddle.enable_static() def test_eager(self): @@ -150,14 +150,14 @@ class TestSlogDeterminantAPI(unittest.TestCase): res = exe.run(feed={'X': self.x}, fetch_list=[out]) out_ref = np.array(np.linalg.slogdet(self.x)) for out in res: - self.assertEqual(np.allclose(out, out_ref, rtol=1e-03), True) + np.testing.assert_allclose(out, out_ref, rtol=0.001) def test_api_dygraph(self): paddle.disable_static(self.place) x_tensor = paddle.to_tensor(self.x) out = paddle.linalg.slogdet(x_tensor) out_ref = np.array(np.linalg.slogdet(self.x)) - self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-03), True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.001) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_dgc_momentum_op.py b/python/paddle/fluid/tests/unittests/test_dgc_momentum_op.py index d827c500995..0c79d80d343 100644 --- a/python/paddle/fluid/tests/unittests/test_dgc_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_dgc_momentum_op.py @@ -95,10 +95,13 @@ class TestDGCMomentumOp1(unittest.TestCase): } def check(self, actual_t, expect_t, place, out_name, atol=1e-5): - self.assertTrue( - np.allclose(actual_t, expect_t, atol=atol), - "Output (" + out_name + ") has diff at " + str(place) + - "\nExpect " + str(expect_t) + "\n" + "But Got" + str(actual_t)) + np.testing.assert_allclose( + actual_t, + expect_t, + rtol=1e-05, + atol=atol, + err_msg='Output (' + out_name + ') has diff at ' + str(place) + + '\nExpect ' + str(expect_t) + '\n' + 'But Got' + str(actual_t)) def check_momentum_step(self, place): self.setup(place=place) diff --git a/python/paddle/fluid/tests/unittests/test_dgc_op.py b/python/paddle/fluid/tests/unittests/test_dgc_op.py index 0ab710b8cbb..867c918c35a 100644 --- a/python/paddle/fluid/tests/unittests/test_dgc_op.py +++ b/python/paddle/fluid/tests/unittests/test_dgc_op.py @@ -90,10 +90,13 @@ class TestDGCOp(unittest.TestCase): self.gather_buff_name).get_tensor() def check(self, actual_t, expect_t, place, out_name, atol=1e-5): - self.assertTrue( - np.allclose(actual_t, expect_t, atol=atol), - "Output (" + out_name + ") has diff at " + str(place) + - "\nExpect " + str(expect_t) + "\n" + "But Got" + str(actual_t)) + np.testing.assert_allclose( + actual_t, + expect_t, + rtol=1e-05, + atol=atol, + err_msg='Output (' + out_name + ') has diff at ' + str(place) + + '\nExpect ' + str(expect_t) + '\n' + 'But Got' + str(actual_t)) def test_run_and_check(self): self.setup(place=core.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_diag_embed.py b/python/paddle/fluid/tests/unittests/test_diag_embed.py index 546247167b8..b7a869484e5 100644 --- a/python/paddle/fluid/tests/unittests/test_diag_embed.py +++ b/python/paddle/fluid/tests/unittests/test_diag_embed.py @@ -69,8 +69,8 @@ class TestDiagEmbedAPICase(unittest.TestCase): [np.stack([np.diag(s, 0) for s in r], 0) for r in diag_embed], 0) target2 = np.stack( [np.stack([np.diag(s, 1) for s in r], 0) for r in diag_embed], 0) - self.assertTrue(np.allclose(results[0], target1)) - self.assertTrue(np.allclose(results[1], target2)) + np.testing.assert_allclose(results[0], target1, rtol=1e-05) + np.testing.assert_allclose(results[1], target2, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_diag_v2.py b/python/paddle/fluid/tests/unittests/test_diag_v2.py index aaae8e65730..b1988849217 100644 --- a/python/paddle/fluid/tests/unittests/test_diag_v2.py +++ b/python/paddle/fluid/tests/unittests/test_diag_v2.py @@ -150,48 +150,48 @@ class TestDiagV2API(unittest.TestCase): def run_imperative(self): x = paddle.to_tensor(self.input_np) y = paddle.diag(x) - self.assertTrue(np.allclose(y.numpy(), self.expected0)) + np.testing.assert_allclose(y.numpy(), self.expected0, rtol=1e-05) y = paddle.diag(x, offset=1) - self.assertTrue(np.allclose(y.numpy(), self.expected1)) + np.testing.assert_allclose(y.numpy(), self.expected1, rtol=1e-05) y = paddle.diag(x, offset=-1) - self.assertTrue(np.allclose(y.numpy(), self.expected2)) + np.testing.assert_allclose(y.numpy(), self.expected2, rtol=1e-05) x = paddle.to_tensor(self.input_np2) y = paddle.diag(x, padding_value=8) - self.assertTrue(np.allclose(y.numpy(), self.expected3)) + np.testing.assert_allclose(y.numpy(), self.expected3, rtol=1e-05) x = paddle.to_tensor(self.input_np3) y = paddle.diag(x, padding_value=8.0) - self.assertTrue(np.allclose(y.numpy(), self.expected4)) + np.testing.assert_allclose(y.numpy(), self.expected4, rtol=1e-05) y = paddle.diag(x, padding_value=-8) - self.assertTrue(np.allclose(y.numpy(), self.expected5)) + np.testing.assert_allclose(y.numpy(), self.expected5, rtol=1e-05) x = paddle.to_tensor(self.input_np4) y = paddle.diag(x) - self.assertTrue(np.allclose(y.numpy(), self.expected6)) + np.testing.assert_allclose(y.numpy(), self.expected6, rtol=1e-05) y = paddle.diag(x, offset=1) - self.assertTrue(np.allclose(y.numpy(), self.expected7)) + np.testing.assert_allclose(y.numpy(), self.expected7, rtol=1e-05) y = paddle.diag(x, offset=-1) - self.assertTrue(np.allclose(y.numpy(), self.expected8)) + np.testing.assert_allclose(y.numpy(), self.expected8, rtol=1e-05) x = paddle.to_tensor(self.input_np5) y = paddle.diag(x) - self.assertTrue(np.allclose(y.numpy(), self.expected9)) + np.testing.assert_allclose(y.numpy(), self.expected9, rtol=1e-05) y = paddle.diag(x, offset=1) - self.assertTrue(np.allclose(y.numpy(), self.expected10)) + np.testing.assert_allclose(y.numpy(), self.expected10, rtol=1e-05) y = paddle.diag(x, offset=-1) - self.assertTrue(np.allclose(y.numpy(), self.expected11)) + np.testing.assert_allclose(y.numpy(), self.expected11, rtol=1e-05) x = paddle.to_tensor(self.input_np6) y = paddle.diag(x, offset=-1) - self.assertTrue(np.allclose(y.numpy(), self.expected12)) + np.testing.assert_allclose(y.numpy(), self.expected12, rtol=1e-05) def run_static(self, use_gpu=False): x = paddle.static.data(name='input', shape=[10, 10], dtype='float32') @@ -236,20 +236,20 @@ class TestDiagV2API(unittest.TestCase): result8, result9, result10, result11, result12, result13 ]) - self.assertTrue(np.allclose(res0, self.expected0)) - self.assertTrue(np.allclose(res1, self.expected1)) - self.assertTrue(np.allclose(res2, self.expected2)) + np.testing.assert_allclose(res0, self.expected0, rtol=1e-05) + np.testing.assert_allclose(res1, self.expected1, rtol=1e-05) + np.testing.assert_allclose(res2, self.expected2, rtol=1e-05) self.assertTrue('aaa' in result3.name) - self.assertTrue(np.allclose(res4, self.expected3)) - self.assertTrue(np.allclose(res5, self.expected4)) - self.assertTrue(np.allclose(res6, self.expected5)) - self.assertTrue(np.allclose(res7, self.expected6)) - self.assertTrue(np.allclose(res8, self.expected7)) - self.assertTrue(np.allclose(res9, self.expected8)) - self.assertTrue(np.allclose(res10, self.expected9)) - self.assertTrue(np.allclose(res11, self.expected10)) - self.assertTrue(np.allclose(res12, self.expected11)) - self.assertTrue(np.allclose(res13, self.expected12)) + np.testing.assert_allclose(res4, self.expected3, rtol=1e-05) + np.testing.assert_allclose(res5, self.expected4, rtol=1e-05) + np.testing.assert_allclose(res6, self.expected5, rtol=1e-05) + np.testing.assert_allclose(res7, self.expected6, rtol=1e-05) + np.testing.assert_allclose(res8, self.expected7, rtol=1e-05) + np.testing.assert_allclose(res9, self.expected8, rtol=1e-05) + np.testing.assert_allclose(res10, self.expected9, rtol=1e-05) + np.testing.assert_allclose(res11, self.expected10, rtol=1e-05) + np.testing.assert_allclose(res12, self.expected11, rtol=1e-05) + np.testing.assert_allclose(res13, self.expected12, rtol=1e-05) def test_cpu(self): paddle.disable_static(place=paddle.fluid.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_diagflat.py b/python/paddle/fluid/tests/unittests/test_diagflat.py index 98f8c3d434f..389f22baa87 100644 --- a/python/paddle/fluid/tests/unittests/test_diagflat.py +++ b/python/paddle/fluid/tests/unittests/test_diagflat.py @@ -52,23 +52,23 @@ class TestDiagFlatAPI(unittest.TestCase): def run_imperative(self): x = paddle.to_tensor(self.input_np) y = paddle.diagflat(x) - self.assertTrue(np.allclose(y.numpy(), self.expected0)) + np.testing.assert_allclose(y.numpy(), self.expected0, rtol=1e-05) y = paddle.diagflat(x, offset=1) - self.assertTrue(np.allclose(y.numpy(), self.expected1)) + np.testing.assert_allclose(y.numpy(), self.expected1, rtol=1e-05) y = paddle.diagflat(x, offset=-1) - self.assertTrue(np.allclose(y.numpy(), self.expected2)) + np.testing.assert_allclose(y.numpy(), self.expected2, rtol=1e-05) x = paddle.to_tensor(self.input_np2) y = paddle.diagflat(x) - self.assertTrue(np.allclose(y.numpy(), self.expected3)) + np.testing.assert_allclose(y.numpy(), self.expected3, rtol=1e-05) y = paddle.diagflat(x, offset=1) - self.assertTrue(np.allclose(y.numpy(), self.expected4)) + np.testing.assert_allclose(y.numpy(), self.expected4, rtol=1e-05) y = paddle.diagflat(x, offset=-1) - self.assertTrue(np.allclose(y.numpy(), self.expected5)) + np.testing.assert_allclose(y.numpy(), self.expected5, rtol=1e-05) def run_static(self, use_gpu=False): x = paddle.static.data(name='input', shape=[10, 10], dtype='float64') @@ -85,8 +85,8 @@ class TestDiagFlatAPI(unittest.TestCase): }, fetch_list=[result0, result3]) - self.assertTrue(np.allclose(res0, self.expected0)) - self.assertTrue(np.allclose(res3, self.expected3)) + np.testing.assert_allclose(res0, self.expected0, rtol=1e-05) + np.testing.assert_allclose(res3, self.expected3, rtol=1e-05) def test_cpu(self): paddle.disable_static(place=paddle.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_diagonal_op.py b/python/paddle/fluid/tests/unittests/test_diagonal_op.py index b5600f21b78..65d202078d2 100644 --- a/python/paddle/fluid/tests/unittests/test_diagonal_op.py +++ b/python/paddle/fluid/tests/unittests/test_diagonal_op.py @@ -115,14 +115,14 @@ class TestDiagonalAPI(unittest.TestCase): res = exe.run(feed={'X': self.x}, fetch_list=[out]) out_ref = np.diagonal(self.x) for out in res: - self.assertEqual(np.allclose(out, out_ref, rtol=1e-08), True) + np.testing.assert_allclose(out, out_ref, rtol=1e-08) def test_api_dygraph(self): paddle.disable_static(self.place) x_tensor = paddle.to_tensor(self.x) out = paddle.diagonal(x_tensor) out_ref = np.diagonal(self.x) - self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-08), True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-08) paddle.enable_static() def test_api_eager(self): @@ -134,13 +134,13 @@ class TestDiagonalAPI(unittest.TestCase): out3 = paddle.diagonal(x_tensor, offset=1, axis1=0, axis2=1) out4 = paddle.diagonal(x_tensor, offset=0, axis1=1, axis2=2) out_ref = np.diagonal(self.x) - self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-08), True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-08) out2_ref = np.diagonal(self.x, offset=0, axis1=2, axis2=1) - self.assertEqual(np.allclose(out2.numpy(), out2_ref, rtol=1e-08), True) + np.testing.assert_allclose(out2.numpy(), out2_ref, rtol=1e-08) out3_ref = np.diagonal(self.x, offset=1, axis1=0, axis2=1) - self.assertEqual(np.allclose(out3.numpy(), out3_ref, rtol=1e-08), True) + np.testing.assert_allclose(out3.numpy(), out3_ref, rtol=1e-08) out4_ref = np.diagonal(self.x, offset=0, axis1=1, axis2=2) - self.assertEqual(np.allclose(out4.numpy(), out4_ref, rtol=1e-08), True) + np.testing.assert_allclose(out4.numpy(), out4_ref, rtol=1e-08) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_digamma_op.py b/python/paddle/fluid/tests/unittests/test_digamma_op.py index 27ba710a96d..c57e26bbbff 100644 --- a/python/paddle/fluid/tests/unittests/test_digamma_op.py +++ b/python/paddle/fluid/tests/unittests/test_digamma_op.py @@ -85,8 +85,7 @@ class TestDigammaAPI(unittest.TestCase): exe = static.Executor(place) out_value = exe.run(feed=input_dict, fetch_list=[out.name]) - self.assertEqual( - np.allclose(out_value[0], sc_res, rtol=1e-5), True) + np.testing.assert_allclose(out_value[0], sc_res, rtol=1e-05) def test_in_dynamic_mode(self): for dtype in self.dtypes: @@ -97,7 +96,7 @@ class TestDigammaAPI(unittest.TestCase): with fluid.dygraph.guard(place): input_t = paddle.to_tensor(input) res = paddle.digamma(input_t).numpy() - self.assertEqual(np.allclose(res, sc_res, rtol=1e-05), True) + np.testing.assert_allclose(res, sc_res, rtol=1e-05) def test_in_eager_dynamic_mode(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_dist_op.py b/python/paddle/fluid/tests/unittests/test_dist_op.py index 255431544f9..37835d54176 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_op.py +++ b/python/paddle/fluid/tests/unittests/test_dist_op.py @@ -183,7 +183,7 @@ class TestDistAPI(unittest.TestCase): 'y': y_i }, fetch_list=[result]) - self.assertTrue(np.allclose(dist(x_i, y_i, p), out[0])) + np.testing.assert_allclose(dist(x_i, y_i, p), out[0], rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_dist_train.py b/python/paddle/fluid/tests/unittests/test_dist_train.py index bdaee766543..04e1c92d27a 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_train.py +++ b/python/paddle/fluid/tests/unittests/test_dist_train.py @@ -20,7 +20,7 @@ import unittest from multiprocessing import Process import signal -import numpy +import numpy as np import paddle.fluid as fluid import paddle.fluid.layers as layers @@ -56,7 +56,7 @@ class TestSendOp(unittest.TestCase): self.init_client(place, selected_port) self.run_local(place) - self.assertTrue(numpy.allclose(self.local_out, self.dist_out)) + np.testing.assert_allclose(self.local_out, self.dist_out, rtol=1e-05) os.kill(p.pid, signal.SIGINT) p.join() diff --git a/python/paddle/fluid/tests/unittests/test_distribute_fpn_proposals_op.py b/python/paddle/fluid/tests/unittests/test_distribute_fpn_proposals_op.py index 7950c278422..24d15769492 100644 --- a/python/paddle/fluid/tests/unittests/test_distribute_fpn_proposals_op.py +++ b/python/paddle/fluid/tests/unittests/test_distribute_fpn_proposals_op.py @@ -220,7 +220,7 @@ class TestDistributeFpnProposalsAPI(unittest.TestCase): output_dy_np.append(output_np) for res_stat, res_dy in zip(output_stat_np, output_dy_np): - self.assertTrue(np.allclose(res_stat, res_dy)) + np.testing.assert_allclose(res_stat, res_dy, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_dot_op.py b/python/paddle/fluid/tests/unittests/test_dot_op.py index 1ce352251c1..32418ec90b2 100644 --- a/python/paddle/fluid/tests/unittests/test_dot_op.py +++ b/python/paddle/fluid/tests/unittests/test_dot_op.py @@ -131,8 +131,9 @@ class TestDygraph(unittest.TestCase): with fluid.dygraph.guard(): x1 = fluid.dygraph.to_variable(np.array([1, 3]).astype(np.float32)) y1 = fluid.dygraph.to_variable(np.array([2, 5]).astype(np.float32)) - self.assertTrue( - np.allclose(paddle.dot(x1, y1).numpy(), np.array([17]))) + np.testing.assert_allclose(paddle.dot(x1, y1).numpy(), + np.array([17]), + rtol=1e-05) x1 = fluid.dygraph.to_variable( np.array([[1, 3], [3, 5]]).astype(np.float32)) diff --git a/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py b/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py index c696863c612..9b8520dfdbc 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py @@ -122,7 +122,7 @@ class TestDropoutNdAPI(unittest.TestCase): input = paddle.to_tensor(in_np) res1 = dropout_nd(x=input, p=0., axis=[0, 1]) res2 = dropout_nd(x=input, p=0.5, axis=[0, 1]) - self.assertTrue(np.allclose(res1.numpy(), in_np)) + np.testing.assert_allclose(res1.numpy(), in_np, rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index a5f33288362..094b157b5e4 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -345,7 +345,7 @@ class TestDropoutOpWithSeedOnCPUPlace(unittest.TestCase): feed={}, fetch_list=[x_out_var.name, mask_var.name]) x_in_np = np.ones([40, 40]).astype("float32") - self.assertTrue(np.allclose(x_out, x_in_np)) + np.testing.assert_allclose(x_out, x_in_np, rtol=1e-05) class TestDropoutOpError(unittest.TestCase): @@ -451,11 +451,11 @@ class TestDropoutFAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": in_np}, fetch_list=[res]) - self.assertTrue(np.allclose(fetches[0], res_np)) + np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05) fetches2 = exe.run(fluid.default_main_program(), feed={"input": in_np}, fetch_list=[res10]) - self.assertTrue(np.allclose(fetches2[0], res_np2)) + np.testing.assert_allclose(fetches2[0], res_np2, rtol=1e-05) fetches3 = exe.run(fluid.default_main_program(), feed={"input": in_np}, fetch_list=[res13]) @@ -536,8 +536,8 @@ class TestDropoutFAPI(unittest.TestCase): res12 ] for res in res_list: - self.assertTrue(np.allclose(res.numpy(), res_np)) - self.assertTrue(np.allclose(res10.numpy(), res_np2)) + np.testing.assert_allclose(res.numpy(), res_np, rtol=1e-05) + np.testing.assert_allclose(res10.numpy(), res_np2, rtol=1e-05) class TestDropoutFAPIError(unittest.TestCase): @@ -636,7 +636,9 @@ class TestDropoutCAPI(unittest.TestCase): m = paddle.nn.Dropout(p=0.) m.eval() result = m(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), + result_np, + rtol=1e-05) class TestDropout2DFAPI(unittest.TestCase): @@ -670,7 +672,7 @@ class TestDropout2DFAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": in_np}, fetch_list=[res]) - self.assertTrue(np.allclose(fetches[0], res_np)) + np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05) def test_static(self): for place in self.places: @@ -694,7 +696,7 @@ class TestDropout2DFAPI(unittest.TestCase): res_list = [res1, res2] for res in res_list: - self.assertTrue(np.allclose(res.numpy(), res_np)) + np.testing.assert_allclose(res.numpy(), res_np, rtol=1e-05) class TestDropout2DFAPIError(unittest.TestCase): @@ -734,7 +736,9 @@ class TestDropout2DCAPI(unittest.TestCase): m = paddle.nn.Dropout2D(p=0.) m.eval() result = m(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), + result_np, + rtol=1e-05) class TestDropout3DFAPI(unittest.TestCase): @@ -768,7 +772,7 @@ class TestDropout3DFAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": in_np}, fetch_list=[res]) - self.assertTrue(np.allclose(fetches[0], res_np)) + np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05) def test_static(self): for place in self.places: @@ -792,7 +796,7 @@ class TestDropout3DFAPI(unittest.TestCase): res_list = [res1, res2] for res in res_list: - self.assertTrue(np.allclose(res.numpy(), res_np)) + np.testing.assert_allclose(res.numpy(), res_np, rtol=1e-05) class TestDropout3DFAPIError(unittest.TestCase): @@ -832,7 +836,9 @@ class TestDropout3DCAPI(unittest.TestCase): m = paddle.nn.Dropout3D(p=0.) m.eval() result = m(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), + result_np, + rtol=1e-05) class TestAlphaDropoutFAPI(unittest.TestCase): @@ -862,11 +868,11 @@ class TestAlphaDropoutFAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": in_np}, fetch_list=[res]) - self.assertTrue(np.allclose(fetches[0], res_np)) + np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05) fetches = exe.run(fluid.default_main_program(), feed={"input": in_np}, fetch_list=[res3]) - self.assertTrue(np.allclose(fetches[0], res_np3)) + np.testing.assert_allclose(fetches[0], res_np3, rtol=1e-05) def test_static(self): for place in self.places: @@ -888,8 +894,8 @@ class TestAlphaDropoutFAPI(unittest.TestCase): res_list = [res1, res2] for res in res_list: - self.assertTrue(np.allclose(res.numpy(), res_np)) - self.assertTrue(np.allclose(res3.numpy(), res_np3)) + np.testing.assert_allclose(res.numpy(), res_np, rtol=1e-05) + np.testing.assert_allclose(res3.numpy(), res_np3, rtol=1e-05) class TestAlphaDropoutFAPIError(unittest.TestCase): @@ -944,7 +950,9 @@ class TestAlphaDropoutCAPI(unittest.TestCase): m = paddle.nn.AlphaDropout(p=0.) m.eval() result = m(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), + result_np, + rtol=1e-05) class TestDropoutWithDeterminateSeedGenerator(unittest.TestCase): @@ -982,7 +990,7 @@ class TestDropoutWithDeterminateSeedGenerator(unittest.TestCase): out1, out2 = exe.run(static.default_main_program(), feed={"input": in_np}, fetch_list=res_list) - self.assertTrue(np.allclose(out1, out2)) + np.testing.assert_allclose(out1, out2, rtol=1e-05) def test_static(self): for place in self.places: @@ -1043,10 +1051,10 @@ class TestDropoutBackward(unittest.TestCase): "upscale_in_train") out.backward() - self.assertTrue( - np.allclose(input.gradient(), - self.cal_grad_upscale_train(mask.numpy(), - prob))) + np.testing.assert_allclose(input.gradient(), + self.cal_grad_upscale_train( + mask.numpy(), prob), + rtol=1e-05) def test_backward_upscale_train_eager(self): for place in self.places: @@ -1059,10 +1067,10 @@ class TestDropoutBackward(unittest.TestCase): input, None, 0.5, False, "upscale_in_train", 0, False) out.backward() - self.assertTrue( - np.allclose( - input.gradient(), - self.cal_grad_upscale_train(mask.numpy(), prob))) + np.testing.assert_allclose(input.gradient(), + self.cal_grad_upscale_train( + mask.numpy(), prob), + rtol=1e-05) def test_backward_upscale_train_2(self): _enable_legacy_dygraph() @@ -1077,10 +1085,10 @@ class TestDropoutBackward(unittest.TestCase): "upscale_in_train") out.backward() - self.assertTrue( - np.allclose(input.gradient(), - self.cal_grad_upscale_train(mask.numpy(), - prob))) + np.testing.assert_allclose(input.gradient(), + self.cal_grad_upscale_train( + mask.numpy(), prob), + rtol=1e-05) def test_backward_upscale_train_2_eager(self): for place in self.places: @@ -1095,10 +1103,10 @@ class TestDropoutBackward(unittest.TestCase): out.backward() - self.assertTrue( - np.allclose( - input.gradient(), - self.cal_grad_upscale_train(mask.numpy(), prob))) + np.testing.assert_allclose(input.gradient(), + self.cal_grad_upscale_train( + mask.numpy(), prob), + rtol=1e-05) class TestDropOutWithProbTensor(unittest.TestCase): @@ -1169,7 +1177,7 @@ class TestRandomValue(unittest.TestCase): 0.6914956, 0.5294584, 0.19032137, 0.6996228, 0.3338527, 0.8442094, 0.96965003, 1.1726775, 0., 0.28037727 ] - self.assertTrue(np.allclose(out[10, 100, 500:510], expect)) + np.testing.assert_allclose(out[10, 100, 500:510], expect, rtol=1e-05) x = paddle.rand([32, 1024, 1024], dtype='float64') out = paddle.nn.functional.dropout(x).numpy() @@ -1182,7 +1190,7 @@ class TestRandomValue(unittest.TestCase): 1.28587354, 0.15563703, 0., 0.28799703, 0., 0., 0., 0.54964, 0.51355682, 0.33818988 ] - self.assertTrue(np.allclose(out[20, 100, 500:510], expect)) + np.testing.assert_allclose(out[20, 100, 500:510], expect, rtol=1e-05) x = paddle.ones([32, 1024, 1024], dtype='float16') out = paddle.nn.functional.dropout(x, 0.75).numpy() @@ -1191,7 +1199,7 @@ class TestRandomValue(unittest.TestCase): self.assertEqual(np.sum(index1), 4291190105) self.assertEqual(np.sum(index2), 4292243807) expect = [0., 0., 0., 0., 0., 0., 0., 0., 4., 4.] - self.assertTrue(np.allclose(out[0, 100, 500:510], expect)) + np.testing.assert_allclose(out[0, 100, 500:510], expect, rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py index 814ef31102f..9a61162bac0 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py @@ -201,12 +201,16 @@ class TestDygraphMultiForward(unittest.TestCase): static_out = out[0] - self.assertTrue(np.allclose(dy_x_data.all(), static_x_data.all())) + np.testing.assert_allclose(dy_x_data.all(), + static_x_data.all(), + rtol=1e-05) for key, value in six.iteritems(static_param_init_value): - self.assertTrue(np.allclose(value, dy_param_init_value[key])) + np.testing.assert_allclose(value, + dy_param_init_value[key], + rtol=1e-05) - self.assertTrue(np.allclose(static_out, dy_out)) + np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_spectral_norm.py b/python/paddle/fluid/tests/unittests/test_dygraph_spectral_norm.py index 9ca53d9a925..03ca8cdaf46 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_spectral_norm.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_spectral_norm.py @@ -95,8 +95,10 @@ class TestDygraphSpectralNorm(unittest.TestCase): self.eps) for expect, actual in zip(expect_output, self.actual_outputs): - self.assertTrue( - np.allclose(np.array(actual), np.array(expect), atol=0.001)) + np.testing.assert_allclose(np.array(actual), + np.array(expect), + rtol=1e-05, + atol=0.001) class TestDygraphWeightNormCase(TestDygraphSpectralNorm): diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py b/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py index 6ca02794a8a..350182c2cb2 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest -import numpy +import numpy as np import collections from functools import reduce import paddle @@ -40,8 +40,8 @@ class TestDygraphWeightNorm(unittest.TestCase): for desc in self.data_desc: data_name = desc[0] data_shape = desc[1] - data_value = numpy.random.random(size=[self.batch_size] + - data_shape).astype('float32') + data_value = np.random.random(size=[self.batch_size] + + data_shape).astype('float32') self.data[data_name] = data_value def norm_except_dim(self, w, dim=None): @@ -49,21 +49,21 @@ class TestDygraphWeightNorm(unittest.TestCase): ndims = len(shape) shape_numel = reduce(lambda x, y: x * y, shape) if dim == -1: - return numpy.linalg.norm(w, axis=None, keepdims=True) + return np.linalg.norm(w, axis=None, keepdims=True) elif dim == 0: tile_shape = list(w.shape) tile_shape[0] = 1 - w_matrix = numpy.reshape(w, (shape[0], shape_numel // shape[0])) - return numpy.linalg.norm(w_matrix, axis=1, keepdims=True) + w_matrix = np.reshape(w, (shape[0], shape_numel // shape[0])) + return np.linalg.norm(w_matrix, axis=1, keepdims=True) elif dim == (ndims - 1): - w_matrix = numpy.reshape(w, (shape_numel // shape[-1], shape[-1])) - return numpy.linalg.norm(w_matrix, axis=0, keepdims=True) + w_matrix = np.reshape(w, (shape_numel // shape[-1], shape[-1])) + return np.linalg.norm(w_matrix, axis=0, keepdims=True) else: perm = list(range(ndims)) perm_ori = list(range(ndims)) perm[0] = dim perm[dim] = 0 - p_transposed = numpy.transpose(w, perm) + p_transposed = np.transpose(w, perm) return self.norm_except_dim(p_transposed, 0) def weight_normalize(self, w, dim=None): @@ -75,41 +75,40 @@ class TestDygraphWeightNorm(unittest.TestCase): g_mul = g if dim == -1: - v_norm = v / (numpy.linalg.norm(v, axis=None, keepdims=True)) + v_norm = v / (np.linalg.norm(v, axis=None, keepdims=True)) elif dim == 0: - w_matrix = numpy.reshape(w, (shape[0], shape_numel // shape[0])) - v_norm = v / numpy.linalg.norm(w_matrix, axis=1) - v_norm = numpy.reshape(v_norm, shape) - g = numpy.squeeze(g, axis=1) + w_matrix = np.reshape(w, (shape[0], shape_numel // shape[0])) + v_norm = v / np.linalg.norm(w_matrix, axis=1) + v_norm = np.reshape(v_norm, shape) + g = np.squeeze(g, axis=1) elif dim == (ndims - 1): - w_matrix = numpy.reshape(w, (shape_numel // shape[-1], shape[-1])) - v_norm = v / numpy.linalg.norm(w_matrix, axis=0, keepdims=True) - v_norm = numpy.reshape(v_norm, shape) + w_matrix = np.reshape(w, (shape_numel // shape[-1], shape[-1])) + v_norm = v / np.linalg.norm(w_matrix, axis=0, keepdims=True) + v_norm = np.reshape(v_norm, shape) else: perm = list(range(ndims)) perm[0] = dim perm[dim] = 0 - p_transposed = numpy.transpose(v, perm) + p_transposed = np.transpose(v, perm) transposed_shape = p_transposed.shape transposed_shape_numel = reduce(lambda x, y: x * y, transposed_shape) - p_matrix = numpy.reshape( + p_matrix = np.reshape( p_transposed, (p_transposed.shape[0], transposed_shape_numel // p_transposed.shape[0])) - v_norm = v / numpy.expand_dims(numpy.expand_dims( - numpy.linalg.norm(p_matrix, axis=1, keepdims=True), axis=0), - axis=(ndims - 1)) - v_norm = numpy.reshape(v_norm, transposed_shape) - v_norm = numpy.transpose(v_norm, perm) - g = numpy.squeeze(g, axis=1) + v_norm = v / np.expand_dims(np.expand_dims( + np.linalg.norm(p_matrix, axis=1, keepdims=True), axis=0), + axis=(ndims - 1)) + v_norm = np.reshape(v_norm, transposed_shape) + v_norm = np.transpose(v_norm, perm) + g = np.squeeze(g, axis=1) if dim == 1: eaxis = 2 elif dim == 2: eaxis = 1 - g_mul = numpy.expand_dims(numpy.expand_dims(numpy.expand_dims( - g, axis=0), - axis=eaxis), - axis=(ndims - 1)) + g_mul = np.expand_dims(np.expand_dims(np.expand_dims(g, axis=0), + axis=eaxis), + axis=(ndims - 1)) w = g_mul * v_norm return g, v @@ -133,8 +132,7 @@ class TestDygraphWeightNorm(unittest.TestCase): expect_output = self.weight_normalize(before_weight, self.dim) for expect, actual in zip(expect_output, self.actual_outputs): - self.assertTrue( - numpy.allclose(numpy.array(actual), expect, atol=0.001)) + self.assertTrue(np.allclose(np.array(actual), expect, atol=0.001)) class TestDygraphWeightNormCase1(TestDygraphWeightNorm): @@ -186,10 +184,10 @@ class TestDygraphRemoveWeightNorm(unittest.TestCase): wn = weight_norm(linear, dim=self.dim) rwn = remove_weight_norm(linear) after_weight = linear.weight - self.assertTrue( - numpy.allclose(before_weight.numpy(), - after_weight.numpy(), - atol=0.001)) + np.testing.assert_allclose(before_weight.numpy(), + after_weight.numpy(), + rtol=1e-05, + atol=0.001) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py b/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py index e81da693b7f..e98cabdbd64 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py @@ -14,7 +14,7 @@ from __future__ import print_function -import numpy +import numpy as np import random import collections import paddle @@ -26,7 +26,7 @@ from decorator_helper import * class Memory(object): def __init__(self, shape, dtype='float32'): - self.ex = numpy.zeros(shape=shape, dtype=dtype) + self.ex = np.zeros(shape=shape, dtype=dtype) self.cur = None def update(self, val): @@ -42,7 +42,7 @@ class Memory(object): self.next() def reset(self): - self.ex = numpy.zeros(shape=self.ex.shape, dtype=self.ex.dtype) + self.ex = np.zeros(shape=self.ex.shape, dtype=self.ex.dtype) self.cur = None @@ -74,7 +74,7 @@ class BaseRNN(object): idtype = ins[iname].get('dtype', 'float32') lst = [] for _ in range(seq_len): - lst.append(numpy.random.random(size=ishape).astype(idtype)) + lst.append(np.random.random(size=ishape).astype(idtype)) self.inputs[iname].append(lst) self.mems = dict() @@ -87,7 +87,7 @@ class BaseRNN(object): for pname in params: pshape = params[pname].get('shape', None) pdtype = params[pname].get('dtype', 'float32') - self.params[pname] = numpy.random.random(size=pshape).astype(pdtype) + self.params[pname] = np.random.random(size=pshape).astype(pdtype) self.outputs = dict() @@ -135,7 +135,7 @@ class BaseRNN(object): retv[out].append(self.outputs[out].last()) for out in retv: - retv[out] = numpy.array(retv[out]) + retv[out] = np.array(retv[out]) return retv def to_feed(self, place): @@ -150,7 +150,7 @@ class BaseRNN(object): np_flatten.extend(self.inputs[iname][seq_id]) t = fluid.Tensor() - t.set(numpy.array(np_flatten), place) + t.set(np.array(np_flatten), place) t.set_recursive_sequence_lengths([lod]) feed_dict[iname] = t @@ -163,7 +163,7 @@ class BaseRNN(object): if len(p.shape) != 2: raise ValueError("Not support get numeric gradient of an parameter," " which is not matrix") - g = numpy.zeros(shape=p.shape, dtype=p.dtype) + g = np.zeros(shape=p.shape, dtype=p.dtype) for i in range(p.shape[0]): for j in range(p.shape[1]): @@ -186,7 +186,7 @@ class BaseRNN(object): for seq in ipt: seq_grad = [] for item in seq: - item_grad = numpy.zeros(shape=item.shape, dtype=item.dtype) + item_grad = np.zeros(shape=item.shape, dtype=item.dtype) if len(item.shape) != 1: raise ValueError("Not support") @@ -205,13 +205,13 @@ class BaseRNN(object): return grad for i in range(len(grad)): - grad[i] = numpy.concatenate(grad[i]) - grad = numpy.concatenate(grad) + grad[i] = np.concatenate(grad[i]) + grad = np.concatenate(grad) return grad def _exe_mean_out_(self): outs = self.exe() - return numpy.array([o.mean() for o in outs.values()]).mean() + return np.array([o.mean() for o in outs.values()]).mean() class SeedFixedTestCase(unittest.TestCase): @@ -219,16 +219,16 @@ class SeedFixedTestCase(unittest.TestCase): @classmethod def setUpClass(cls): """Fix random seeds to remove randomness from tests""" - cls._np_rand_state = numpy.random.get_state() + cls._np_rand_state = np.random.get_state() cls._py_rand_state = random.getstate() - numpy.random.seed(123) + np.random.seed(123) random.seed(124) @classmethod def tearDownClass(cls): """Restore random seeds""" - numpy.random.set_state(cls._np_rand_state) + np.random.set_state(cls._np_rand_state) random.setstate(cls._py_rand_state) @@ -253,7 +253,7 @@ class TestSimpleMul(SeedFixedTestCase): }, [base.OUT_NAME]) def step(self, X, W, Out): - Out.out(numpy.matmul(X, W)) + Out.out(np.matmul(X, W)) # Test many times in local to ensure the random seed cannot breaks CI # @many_times(10) @@ -284,7 +284,7 @@ class TestSimpleMul(SeedFixedTestCase): exe = fluid.Executor(cpu) out, w_g, i_g = list( map( - numpy.array, + np.array, exe.run(feed=py_rnn.to_feed(cpu), fetch_list=[ out, self.PARAM_NAME + "@GRAD", @@ -292,13 +292,13 @@ class TestSimpleMul(SeedFixedTestCase): ], return_numpy=False))) out_by_python = py_rnn.exe()[self.OUT_NAME] - self.assertTrue(numpy.allclose(out, out_by_python)) + np.testing.assert_allclose(out, out_by_python, rtol=1e-05) w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) - self.assertTrue(numpy.allclose(w_g_num, w_g, rtol=0.05)) + np.testing.assert_allclose(w_g_num, w_g, rtol=0.05) i_g_num = py_rnn.get_numeric_gradient_of_input( input_name=self.DATA_NAME) i_g_num = i_g_num.reshape(i_g.shape) - self.assertTrue(numpy.allclose(i_g_num, i_g, rtol=0.05)) + np.testing.assert_allclose(i_g_num, i_g, rtol=0.05) class TestSimpleMulWithMemory(SeedFixedTestCase): @@ -327,7 +327,7 @@ class TestSimpleMulWithMemory(SeedFixedTestCase): }, ['Out']) def step(self, X, Mem, W, Out): - o = numpy.matmul(X, W) + o = np.matmul(X, W) assert isinstance(Mem, Memory) o += Mem.ex Mem.update(o) @@ -366,7 +366,7 @@ class TestSimpleMulWithMemory(SeedFixedTestCase): feed = py_rnn.to_feed(cpu) last_np, w_g, i_g = list( map( - numpy.array, + np.array, exe.run(feed=feed, fetch_list=[ last, self.PARAM_NAME + "@GRAD", @@ -375,15 +375,15 @@ class TestSimpleMulWithMemory(SeedFixedTestCase): return_numpy=False))) last_by_py, = list(py_rnn.exe().values()) w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) - self.assertTrue(numpy.allclose(last_np, last_by_py)) + np.testing.assert_allclose(last_np, last_by_py, rtol=1e-05) - self.assertTrue(numpy.allclose(w_g_num, w_g, rtol=0.1)) + np.testing.assert_allclose(w_g_num, w_g, rtol=0.1) i_g_num = py_rnn.get_numeric_gradient_of_input(self.DATA_NAME) i_g_num = i_g_num.reshape(i_g.shape) # Since this RNN has many float add. The number could be not stable. # rtol = 0.1 - self.assertTrue(numpy.allclose(i_g_num, i_g, rtol=0.1)) + np.testing.assert_allclose(i_g_num, i_g, rtol=0.1) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py index 1daa68aa015..9d6bcfbf8e5 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py @@ -182,8 +182,8 @@ class TestDyRnnStaticInput(unittest.TestCase): expected_outs, expected_lods = self.get_expected_static_step_outs() for i in range(self._max_sequence_len): step_out, lod = self.fetch_value(static_step_outs[i]) - self.assertTrue(np.allclose(step_out, expected_outs[i])) - self.assertTrue(np.allclose(lod, expected_lods[i])) + np.testing.assert_allclose(step_out, expected_outs[i], rtol=1e-05) + np.testing.assert_allclose(lod, expected_lods[i], rtol=1e-05) def test_network_gradient(self): static_input_grad, loss = self.build_graph() @@ -205,10 +205,13 @@ class TestDyRnnStaticInput(unittest.TestCase): y_neg = self.fetch_value(loss)[0][0] self.static_input_tensor._set_float_element(i, origin) numeric_gradients.ravel()[i] = (y_pos - y_neg) / self._delta / 2 - self.assertTrue(np.allclose(actual_gradients, numeric_gradients, 0.001)) - self.assertTrue( - np.allclose(actual_lod, - self.static_input_tensor.recursive_sequence_lengths())) + np.testing.assert_allclose(actual_gradients, + numeric_gradients, + rtol=0.001) + np.testing.assert_allclose( + actual_lod, + self.static_input_tensor.recursive_sequence_lengths(), + rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py index 180e1229514..965b46ee843 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py @@ -624,8 +624,7 @@ class PaddingRNNTestBase(unittest.TestCase): config = RNNConfig('test', 'static') with fluid.scope_guard(fluid.Scope()): static_rnn_ppl = self.train(config, parallel, use_program_cache) - self.assertTrue( - np.isclose(padding_rnn_ppl, static_rnn_ppl, rtol=0.001).all()) + np.testing.assert_allclose(padding_rnn_ppl, static_rnn_ppl, rtol=0.001) class EagerDeletionPaddingRNNTest(PaddingRNNTestBase): diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py index 195278253e8..a468de7a282 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py @@ -212,17 +212,20 @@ class EagerDeletionRecurrentOpTest1(unittest.TestCase): for idx, name in enumerate(self.data_field): self.assertEqual(num_grad[idx].shape, ana_grad[idx].shape) - self.assertTrue( - np.isclose(num_grad[idx], ana_grad[idx], rtol=rtol).all(), - "num_grad (" + name + ") has diff at " + str(self.place) + - "\nExpect " + str(num_grad[idx]) + "\n" + "But Got" + - str(ana_grad[idx]) + " in class " + self.__class__.__name__) + np.testing.assert_allclose( + num_grad[idx], + ana_grad[idx], + rtol=rtol, + err_msg='num_grad (' + name + ') has diff at ' + + str(self.place) + '\nExpect ' + str(num_grad[idx]) + '\n' + + 'But Got' + str(ana_grad[idx]) + ' in class ' + + self.__class__.__name__) def check_forward(self): pd_output = self.forward() py_output = self.py_rnn.forward() self.assertEqual(pd_output.shape, py_output.shape) - self.assertTrue(np.isclose(pd_output, py_output, rtol=0.01).all()) + np.testing.assert_allclose(pd_output, py_output, rtol=0.01) def get_numerical_gradient(self, delta=0.005): dloss_dout = 1.0 @@ -686,9 +689,8 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest( py_output = self.py_rnn.forward() self.assertEqual(forward_only_output.shape, py_output.shape) self.assertEqual(pd_output.shape, py_output.shape) - self.assertTrue( - np.isclose(forward_only_output, py_output, rtol=0.01).all) - self.assertTrue(np.isclose(pd_output, py_output, rtol=0.01).all()) + np.testing.assert_allclose(forward_only_output, py_output, rtol=0.01) + np.testing.assert_allclose(pd_output, py_output, rtol=0.01) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py b/python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py index 26702d682d1..772a8713738 100644 --- a/python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py +++ b/python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py @@ -53,7 +53,7 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase): out = paddle.mm(input, mat2) out_arr = out.numpy() out_arr_expected = np.matmul(np_input, np_mat2) - self.assertTrue(np.allclose(out_arr, out_arr_expected)) + np.testing.assert_allclose(out_arr, out_arr_expected, rtol=1e-05) def test_sigmoid(self): with _test_eager_guard(): @@ -64,7 +64,7 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase): out_arr_expected = np.array( [0.40131234, 0.450166, 0.52497919, 0.57444252]).astype('float32') - self.assertTrue(np.allclose(out_arr, out_arr_expected)) + np.testing.assert_allclose(out_arr, out_arr_expected, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_eig_op.py b/python/paddle/fluid/tests/unittests/test_eig_op.py index 4e8f69a6bda..72326373070 100644 --- a/python/paddle/fluid/tests/unittests/test_eig_op.py +++ b/python/paddle/fluid/tests/unittests/test_eig_op.py @@ -95,14 +95,20 @@ class TestEigOp(OpTest): np.array([np.abs(expect_out_w[i].imag) for i in range(length_w)])) for i in range(length_w): - self.assertTrue( - np.allclose(act_w_real[i], exp_w_real[i], 1e-6, 1e-5), - "The eigenvalues real part have diff: \nExpected " + - str(act_w_real[i]) + "\n" + "But got: " + str(exp_w_real[i])) - self.assertTrue( - np.allclose(act_w_imag[i], exp_w_imag[i], 1e-6, 1e-5), - "The eigenvalues image part have diff: \nExpected " + - str(act_w_imag[i]) + "\n" + "But got: " + str(exp_w_imag[i])) + np.testing.assert_allclose( + act_w_real[i], + exp_w_real[i], + rtol=1e-06, + atol=1e-05, + err_msg='The eigenvalues real part have diff: \nExpected ' + + str(act_w_real[i]) + '\n' + 'But got: ' + str(exp_w_real[i])) + np.testing.assert_allclose( + act_w_imag[i], + exp_w_imag[i], + rtol=1e-06, + atol=1e-05, + err_msg='The eigenvalues image part have diff: \nExpected ' + + str(act_w_imag[i]) + '\n' + 'But got: ' + str(exp_w_imag[i])) length_v = len(expect_out_v) act_v_real = np.sort( @@ -115,14 +121,20 @@ class TestEigOp(OpTest): np.array([np.abs(expect_out_v[i].imag) for i in range(length_v)])) for i in range(length_v): - self.assertTrue( - np.allclose(act_v_real[i], exp_v_real[i], 1e-6, 1e-5), - "The eigenvectors real part have diff: \nExpected " + - str(act_v_real[i]) + "\n" + "But got: " + str(exp_v_real[i])) - self.assertTrue( - np.allclose(act_v_imag[i], exp_v_imag[i], 1e-6, 1e-5), - "The eigenvectors image part have diff: \nExpected " + - str(act_v_imag[i]) + "\n" + "But got: " + str(exp_v_imag[i])) + np.testing.assert_allclose( + act_v_real[i], + exp_v_real[i], + rtol=1e-06, + atol=1e-05, + err_msg='The eigenvectors real part have diff: \nExpected ' + + str(act_v_real[i]) + '\n' + 'But got: ' + str(exp_v_real[i])) + np.testing.assert_allclose( + act_v_imag[i], + exp_v_imag[i], + rtol=1e-06, + atol=1e-05, + err_msg='The eigenvectors image part have diff: \nExpected ' + + str(act_v_imag[i]) + '\n' + 'But got: ' + str(exp_v_imag[i])) def set_dtype(self): self.dtype = np.complex64 @@ -216,14 +228,20 @@ class TestEigStatic(TestEigOp): fetch_val, fetch_vec = exe.run(fluid.default_main_program(), feed={"input": input_np}, fetch_list=[act_val, act_vec]) - self.assertTrue( - np.allclose(expect_val, fetch_val, 1e-6, - 1e-6), "The eigen values have diff: \nExpected " + - str(expect_val) + "\n" + "But got: " + str(fetch_val)) - self.assertTrue( - np.allclose(np.abs(expect_vec), np.abs(fetch_vec), 1e-6, - 1e-6), "The eigen vectors have diff: \nExpected " + - str(np.abs(expect_vec)) + "\n" + "But got: " + + np.testing.assert_allclose( + expect_val, + fetch_val, + rtol=1e-06, + atol=1e-06, + err_msg='The eigen values have diff: \nExpected ' + + str(expect_val) + '\n' + 'But got: ' + str(fetch_val)) + np.testing.assert_allclose( + np.abs(expect_vec), + np.abs(fetch_vec), + rtol=1e-06, + atol=1e-06, + err_msg='The eigen vectors have diff: \nExpected ' + + str(np.abs(expect_vec)) + '\n' + 'But got: ' + str(np.abs(fetch_vec))) @@ -239,14 +257,20 @@ class TestEigDyGraph(unittest.TestCase): input_tensor = paddle.to_tensor(input_np) fetch_val, fetch_vec = paddle.linalg.eig(input_tensor) - self.assertTrue( - np.allclose(expect_val, fetch_val.numpy(), 1e-6, - 1e-6), "The eigen values have diff: \nExpected " + - str(expect_val) + "\n" + "But got: " + str(fetch_val)) - self.assertTrue( - np.allclose(np.abs(expect_vec), np.abs(fetch_vec.numpy()), 1e-6, - 1e-6), "The eigen vectors have diff: \nExpected " + - str(np.abs(expect_vec)) + "\n" + "But got: " + + np.testing.assert_allclose( + expect_val, + fetch_val.numpy(), + rtol=1e-06, + atol=1e-06, + err_msg='The eigen values have diff: \nExpected ' + + str(expect_val) + '\n' + 'But got: ' + str(fetch_val)) + np.testing.assert_allclose( + np.abs(expect_vec), + np.abs(fetch_vec.numpy()), + rtol=1e-06, + atol=1e-06, + err_msg='The eigen vectors have diff: \nExpected ' + + str(np.abs(expect_vec)) + '\n' + 'But got: ' + str(np.abs(fetch_vec.numpy()))) def test_check_grad(self): @@ -267,10 +291,13 @@ class TestEigDyGraph(unittest.TestCase): w, v = paddle.linalg.eig(x) (w.sum() + v.sum()).backward() - self.assertTrue( - np.allclose(np.abs(x.grad.numpy()), np.abs(grad_x), 1e-5, 1e-5), - "The grad x have diff: \nExpected " + str(np.abs(grad_x)) + "\n" + - "But got: " + str(np.abs(x.grad.numpy()))) + np.testing.assert_allclose(np.abs(x.grad.numpy()), + np.abs(grad_x), + rtol=1e-05, + atol=1e-05, + err_msg='The grad x have diff: \nExpected ' + + str(np.abs(grad_x)) + '\n' + 'But got: ' + + str(np.abs(x.grad.numpy()))) class TestEigWrongDimsError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_einsum.py b/python/paddle/fluid/tests/unittests/test_einsum.py index 9ba4869786c..9fd54f860a1 100644 --- a/python/paddle/fluid/tests/unittests/test_einsum.py +++ b/python/paddle/fluid/tests/unittests/test_einsum.py @@ -135,10 +135,13 @@ class TestEinsum(unittest.TestCase): def check_output_equal(self, actual, expect, rtol=1.e-5, atol=1.e-8): error_msg = 'Output has diff at place:{}. \nExpect: {} \nBut Got: {} in class {}' - self.assertTrue( - np.allclose(actual, expect, rtol=rtol, atol=atol), - error_msg.format(paddle.get_device(), expect, actual, - self.__class__.__name__)) + np.testing.assert_allclose(actual, + expect, + rtol=rtol, + atol=atol, + err_msg=error_msg.format( + paddle.get_device(), expect, actual, + self.__class__.__name__)) def setUp(self): self.sample = {"paradigm": "i->", "data": ["x"]} diff --git a/python/paddle/fluid/tests/unittests/test_einsum_v2.py b/python/paddle/fluid/tests/unittests/test_einsum_v2.py index e97e089252a..b8ceff78d27 100644 --- a/python/paddle/fluid/tests/unittests/test_einsum_v2.py +++ b/python/paddle/fluid/tests/unittests/test_einsum_v2.py @@ -150,10 +150,13 @@ class TestEinsum(unittest.TestCase): def check_output_equal(self, actual, expect, rtol=1.e-5, atol=1.e-8): error_msg = 'Output has diff at place:{}. \nExpect: {} \nBut Got: {} in class {}' - self.assertTrue( - np.allclose(actual, expect, rtol=rtol, atol=atol), - error_msg.format(paddle.get_device(), expect, actual, - self.__class__.__name__)) + np.testing.assert_allclose(actual, + expect, + rtol=rtol, + atol=atol, + err_msg=error_msg.format( + paddle.get_device(), expect, actual, + self.__class__.__name__)) def setUp(self): self.sample = {"paradigm": "i->", "data": ["x"]} diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py index 6c300ce24d3..9a5ce244802 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py @@ -23,7 +23,11 @@ import paddle.fluid as fluid class TestElementWiseAddOp(unittest.TestCase): def __assert_close(self, tensor, np_array, msg, atol=1e-4): - self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) + np.testing.assert_allclose(np.array(tensor), + np_array, + rtol=1e-05, + atol=atol, + err_msg=msg) def check_forward_backward(self): diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_heaviside_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_heaviside_op.py index 73d110ce132..ad7b0c80508 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_heaviside_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_heaviside_op.py @@ -64,19 +64,19 @@ class TestHeavisideBroadcast(unittest.TestCase): res = paddle.heaviside(self.tensor_1, self.tensor_2) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected1)) + np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) res = paddle.heaviside(self.tensor_2, self.tensor_3) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected2)) + np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) res = paddle.heaviside(self.tensor_2, self.tensor_4) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected3)) + np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) res = paddle.heaviside(self.tensor_4, self.tensor_5) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected4)) + np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) class TestHeavisideAPI_float64(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py index 25a0c0a0652..7ffe3d173c2 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_min_op.py @@ -206,15 +206,9 @@ class TestElementwiseMinOpFP16(unittest.TestCase): z_1, x_g_1, y_g_1 = self.get_out_and_grad(x_np, y_np, axis, place, False) z_2, x_g_2, y_g_2 = self.get_out_and_grad(x_np, y_np, axis, place, True) - np.testing.assert_array_equal(z_1, - z_2, - err_msg='{} vs {}'.format(z_1, z_2)) - np.testing.assert_array_equal(x_g_1, - x_g_2, - err_msg='{} vs {}'.format(x_g_1, x_g_2)) - np.testing.assert_array_equal(y_g_1, - y_g_2, - err_msg='{} vs {}'.format(y_g_1, y_g_2)) + np.testing.assert_array_equal(z_1, z_2) + np.testing.assert_array_equal(x_g_1, x_g_2) + np.testing.assert_array_equal(y_g_1, y_g_2) def test_main(self): self.check_main((13, 17), (13, 17)) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py index 436ce466be3..f51d1012243 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py @@ -122,7 +122,7 @@ class TestRemainderOp(unittest.TestCase): y = paddle.to_tensor(np_y) z = x % y z_expected = np.array([-0.9, 1.5, 1.3, -1.1]) - self.assertEqual(np.allclose(z_expected, z.numpy()), True) + np.testing.assert_allclose(z_expected, z.numpy(), rtol=1e-05) np_x = np.array([-3, 11, -2, 3]) np_y = np.array([-1, 2, 3, -2]) @@ -130,7 +130,7 @@ class TestRemainderOp(unittest.TestCase): y = paddle.to_tensor(np_y, dtype="int64") z = x % y z_expected = np.array([0, 1, 1, -1]) - self.assertEqual(np.allclose(z_expected, z.numpy()), True) + np.testing.assert_allclose(z_expected, z.numpy(), rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_ema.py b/python/paddle/fluid/tests/unittests/test_ema.py index dd3472d31c9..a48ce090ca6 100644 --- a/python/paddle/fluid/tests/unittests/test_ema.py +++ b/python/paddle/fluid/tests/unittests/test_ema.py @@ -80,7 +80,7 @@ class TestExponentialMovingAverage(unittest.TestCase): manu_ema = self._ema_decay * manu_ema + ( 1 - self._ema_decay) * param manu_ema = manu_ema / (1.0 - self._ema_decay**len(params)) - self.assertTrue(np.allclose(manu_ema, final_ema)) + np.testing.assert_allclose(manu_ema, final_ema, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_ema_fleet.py b/python/paddle/fluid/tests/unittests/test_ema_fleet.py index c08f811a178..5a361416ab9 100644 --- a/python/paddle/fluid/tests/unittests/test_ema_fleet.py +++ b/python/paddle/fluid/tests/unittests/test_ema_fleet.py @@ -90,7 +90,7 @@ class TestFleetStaticEMA(unittest.TestCase): manu_ema = self._ema_decay * manu_ema + ( 1 - self._ema_decay) * param manu_ema = manu_ema / (1.0 - self._ema_decay**len(params)) - self.assertTrue(np.allclose(manu_ema, final_ema)) + np.testing.assert_allclose(manu_ema, final_ema, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_erf_op.py b/python/paddle/fluid/tests/unittests/test_erf_op.py index c7d7b3abc9a..8f3184b656e 100644 --- a/python/paddle/fluid/tests/unittests/test_erf_op.py +++ b/python/paddle/fluid/tests/unittests/test_erf_op.py @@ -54,7 +54,7 @@ class TestErfLayer(unittest.TestCase): x_var = dg.to_variable(x) y_var = fluid.layers.erf(x_var) y_test = y_var.numpy() - self.assertTrue(np.allclose(y_ref, y_test)) + np.testing.assert_allclose(y_ref, y_test, rtol=1e-05) def test_case(self): self._test_case(fluid.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_erfinv_op.py b/python/paddle/fluid/tests/unittests/test_erfinv_op.py index 4f10f1daaf7..46b3f5f212c 100644 --- a/python/paddle/fluid/tests/unittests/test_erfinv_op.py +++ b/python/paddle/fluid/tests/unittests/test_erfinv_op.py @@ -82,7 +82,7 @@ class TestErfinvAPI(unittest.TestCase): exe = paddle.static.Executor(place) res = exe.run(feed={'x': self.x.reshape([1, 5])}) for r in res: - self.assertEqual(np.allclose(self.res_ref, r), True) + np.testing.assert_allclose(self.res_ref, r, rtol=1e-05) for place in self.place: run(place) @@ -93,7 +93,7 @@ class TestErfinvAPI(unittest.TestCase): paddle.disable_static(place) x = paddle.to_tensor(self.x) out = paddle.erfinv(x) - self.assertEqual(np.allclose(self.res_ref, out.numpy()), True) + np.testing.assert_allclose(self.res_ref, out.numpy(), rtol=1e-05) paddle.enable_static() for place in self.place: @@ -105,7 +105,7 @@ class TestErfinvAPI(unittest.TestCase): paddle.disable_static(place) x = paddle.to_tensor(self.x) x.erfinv_() - self.assertEqual(np.allclose(self.res_ref, x.numpy()), True) + np.testing.assert_allclose(self.res_ref, x.numpy(), rtol=1e-05) paddle.enable_static() for place in self.place: diff --git a/python/paddle/fluid/tests/unittests/test_executor_and_mul.py b/python/paddle/fluid/tests/unittests/test_executor_and_mul.py index 1f3394b6019..84a2ac78d29 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_and_mul.py +++ b/python/paddle/fluid/tests/unittests/test_executor_and_mul.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest -import numpy +import numpy as np import paddle.fluid.core as core from paddle.fluid.executor import Executor from paddle.fluid.layers import mul, data, zeros, array_write, increment @@ -40,8 +40,8 @@ class TestExecutor(unittest.TestCase): out = mul(x=a, y=b) array_write(x=out, i=i, array=array) - a_np = numpy.random.random((100, 784)).astype('float32') - b_np = numpy.random.random((784, 100)).astype('float32') + a_np = np.random.random((100, 784)).astype('float32') + b_np = np.random.random((784, 100)).astype('float32') exe = Executor() res, res_array = exe.run(feed={ @@ -51,10 +51,10 @@ class TestExecutor(unittest.TestCase): fetch_list=[out, array]) self.assertEqual((100, 100), res.shape) - self.assertTrue(numpy.allclose(res, numpy.dot(a_np, b_np))) - self.assertTrue(numpy.allclose(res_array[0], a_np)) - self.assertTrue(numpy.allclose(res_array[1], b_np)) - self.assertTrue(numpy.allclose(res_array[2], res)) + np.testing.assert_allclose(res, np.dot(a_np, b_np), rtol=1e-05) + np.testing.assert_allclose(res_array[0], a_np, rtol=1e-05) + np.testing.assert_allclose(res_array[1], b_np, rtol=1e-05) + np.testing.assert_allclose(res_array[2], res, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_executor_and_use_program_cache.py b/python/paddle/fluid/tests/unittests/test_executor_and_use_program_cache.py index ad7a319f9c2..2c8140d43fb 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_and_use_program_cache.py +++ b/python/paddle/fluid/tests/unittests/test_executor_and_use_program_cache.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest -import numpy +import numpy as np import paddle.fluid.core as core import paddle.fluid as fluid from test_eager_deletion_padding_rnn import RNNConfig, PaddingRNNTestBase @@ -36,9 +36,9 @@ class TestExecutor(unittest.TestCase): output = fluid.layers.mul(x=a, y=b) # Compute with numpy - a_np = numpy.random.random((100, 784)).astype('float32') - b_np = numpy.random.random((784, 100)).astype('float32') - out_np = numpy.dot(a_np, b_np) + a_np = np.random.random((100, 784)).astype('float32') + b_np = np.random.random((784, 100)).astype('float32') + out_np = np.dot(a_np, b_np) place = core.CPUPlace() exe = fluid.Executor(place) @@ -60,7 +60,7 @@ class TestExecutor(unittest.TestCase): run_time += end - begin out = outs[0] self.assertEqual((100, 100), out.shape) - self.assertTrue(numpy.allclose(out, out_np)) + np.testing.assert_allclose(out, out_np, rtol=1e-05) return run_time max_iters = 3 @@ -105,16 +105,15 @@ class ExecutorPaddingRNNTest(PaddingRNNTestBase): parallel=True, use_program_cache=True) - x_np = numpy.random.random( - (self.config.batch_size, self.config.num_steps, - 1)).astype("int64") - y_np = numpy.random.random( + x_np = np.random.random((self.config.batch_size, + self.config.num_steps, 1)).astype("int64") + y_np = np.random.random( (self.config.batch_size * self.config.num_steps, 1)).astype("int64") - init_hidden_np = numpy.random.random( + init_hidden_np = np.random.random( (self.config.num_layers, self.config.batch_size, self.config.hidden_size)).astype("float32") - init_cell_np = numpy.random.random( + init_cell_np = np.random.random( (self.config.num_layers, self.config.batch_size, self.config.hidden_size)).astype("float32") @@ -143,9 +142,9 @@ class ExecutorPaddingRNNTest(PaddingRNNTestBase): for i in range(len(results_with_cache)): self.assertEqual(results_with_cache[i].shape, results_without_cache[i].shape) - self.assertTrue( - numpy.allclose(results_with_cache[i], - results_without_cache[i])) + np.testing.assert_allclose(results_with_cache[i], + results_without_cache[i], + rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_exponential_op.py b/python/paddle/fluid/tests/unittests/test_exponential_op.py index 2438b754a12..3b2bb42d57c 100644 --- a/python/paddle/fluid/tests/unittests/test_exponential_op.py +++ b/python/paddle/fluid/tests/unittests/test_exponential_op.py @@ -49,8 +49,7 @@ class TestExponentialOp1(OpTest): hist2 = hist2.astype("float32") hist2 = hist2 / float(data_np.size) - self.assertTrue(np.allclose(hist1, hist2, rtol=0.02), - "actual: {}, expected: {}".format(hist1, hist2)) + np.testing.assert_allclose(hist1, hist2, rtol=0.02) def test_check_grad_normal(self): self.check_grad( @@ -119,28 +118,36 @@ class TestExponentialAPI(unittest.TestCase): 1.9735607, 0.30490234, 0.57100505, 0.8115938 ] - self.assertTrue(np.allclose(x_np[0, 0, 0, 0:10], expect)) + np.testing.assert_allclose(x_np[0, 0, 0, 0:10], expect, rtol=1e-05) expect = [ 1.4296371e+00, 9.5411777e-01, 5.2575850e-01, 2.4805880e-01, 1.2322118e-04, 8.4604341e-01, 2.1111444e-01, 1.4143821e+00, 2.8194717e-01, 1.1360573e+00 ] - self.assertTrue(np.allclose(x_np[16, 1, 300, 200:210], expect)) + np.testing.assert_allclose(x_np[16, 1, 300, 200:210], + expect, + rtol=1e-05) expect = [ 1.3448033, 0.35146526, 1.7380928, 0.32012638, 0.10396296, 0.51344526, 0.15308502, 0.18712929, 0.03888268, 0.20771872 ] - self.assertTrue(np.allclose(x_np[32, 1, 600, 500:510], expect)) + np.testing.assert_allclose(x_np[32, 1, 600, 500:510], + expect, + rtol=1e-05) expect = [ 0.5107464, 0.20970327, 2.1986802, 1.580056, 0.31036147, 0.43966478, 0.9056133, 0.30119267, 1.4797124, 1.4319834 ] - self.assertTrue(np.allclose(x_np[48, 2, 900, 800:810], expect)) + np.testing.assert_allclose(x_np[48, 2, 900, 800:810], + expect, + rtol=1e-05) expect = [ 3.4640615, 1.1019983, 0.41195083, 0.22681557, 0.291846, 0.53617656, 1.5791925, 2.4645927, 0.04094889, 0.9057725 ] - self.assertTrue(np.allclose(x_np[63, 2, 1023, 1000:1010], expect)) + np.testing.assert_allclose(x_np[63, 2, 1023, 1000:1010], + expect, + rtol=1e-05) x = paddle.empty([10, 10], dtype="float32") x.exponential_(3.0) @@ -149,7 +156,7 @@ class TestExponentialAPI(unittest.TestCase): 0.02831675, 0.1691551, 0.6798956, 0.69347525, 0.0243443, 0.22180498, 0.30574575, 0.9839696, 0.2834912, 0.59420055 ] - self.assertTrue(np.allclose(x_np[5, 0:10], expect)) + np.testing.assert_allclose(x_np[5, 0:10], expect, rtol=1e-05) x = paddle.empty([16, 2, 1024, 768], dtype="float64") x.exponential_(0.25) @@ -158,27 +165,31 @@ class TestExponentialAPI(unittest.TestCase): 10.0541229, 12.67860643, 1.09850734, 7.35289643, 2.65471225, 3.86217432, 2.97902086, 2.92744479, 2.67927152, 0.19667352 ] - self.assertTrue(np.allclose(x_np[0, 0, 0, 100:110], expect)) + np.testing.assert_allclose(x_np[0, 0, 0, 100:110], expect, rtol=1e-05) expect = [ 0.68328125, 3.1454553, 0.92158376, 1.95842188, 1.05296941, 12.93242051, 5.20255978, 3.3588624, 1.57377174, 5.73194183 ] - self.assertTrue(np.allclose(x_np[4, 0, 300, 190:200], expect)) + np.testing.assert_allclose(x_np[4, 0, 300, 190:200], expect, rtol=1e-05) expect = [ 1.37973974, 3.45036798, 7.94625406, 1.62610973, 0.31032122, 4.13596493, 1.98494535, 1.13207041, 8.30592769, 2.81460147 ] - self.assertTrue(np.allclose(x_np[8, 1, 600, 300:310], expect)) + np.testing.assert_allclose(x_np[8, 1, 600, 300:310], expect, rtol=1e-05) expect = [ 2.27710811, 12.25003028, 2.96409124, 4.72405788, 0.67917249, 4.35856718, 0.46870976, 2.31120149, 9.61595826, 4.64446271 ] - self.assertTrue(np.allclose(x_np[12, 1, 900, 500:510], expect)) + np.testing.assert_allclose(x_np[12, 1, 900, 500:510], + expect, + rtol=1e-05) expect = [ 0.95883744, 1.57316361, 15.22524512, 20.49559882, 13.70008548, 3.29430143, 3.90390424, 0.9146657, 0.80972249, 0.33376219 ] - self.assertTrue(np.allclose(x_np[15, 1, 1023, 750:760], expect)) + np.testing.assert_allclose(x_np[15, 1, 1023, 750:760], + expect, + rtol=1e-05) x = paddle.empty([512, 768], dtype="float64") x.exponential_(0.3) @@ -187,17 +198,17 @@ class TestExponentialAPI(unittest.TestCase): 8.79266704, 4.79596009, 2.75480243, 6.04670011, 0.35379556, 0.76864868, 3.17428251, 0.26556859, 12.22485885, 10.51690383 ] - self.assertTrue(np.allclose(x_np[0, 200:210], expect)) + np.testing.assert_allclose(x_np[0, 200:210], expect, rtol=1e-05) expect = [ 5.6341126, 0.52243418, 5.36410796, 6.83672002, 11.9243311, 5.85985566, 5.75169548, 0.13877972, 6.1348385, 3.82436519 ] - self.assertTrue(np.allclose(x_np[300, 400:410], expect)) + np.testing.assert_allclose(x_np[300, 400:410], expect, rtol=1e-05) expect = [ 4.94883581, 0.56345306, 0.85841585, 1.92287801, 6.10036656, 1.19524847, 3.64735434, 5.19618716, 2.57467974, 3.49152791 ] - self.assertTrue(np.allclose(x_np[500, 700:710], expect)) + np.testing.assert_allclose(x_np[500, 700:710], expect, rtol=1e-05) x = paddle.empty([10, 10], dtype="float64") x.exponential_(4.0) @@ -206,7 +217,7 @@ class TestExponentialAPI(unittest.TestCase): 0.15713826, 0.56395964, 0.0680941, 0.00316643, 0.27046853, 0.19852724, 0.12776634, 0.09642974, 0.51977551, 1.33739699 ] - self.assertTrue(np.allclose(x_np[5, 0:10], expect)) + np.testing.assert_allclose(x_np[5, 0:10], expect, rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py b/python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py index b707640923a..d14a967b879 100755 --- a/python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py +++ b/python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py @@ -214,9 +214,11 @@ class TestBertTokenizerOp(unittest.TestCase): py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1]) py_token_type_ids = np.array( encoded_inputs[0]["token_type_ids"]).reshape([1, -1]) - self.assertTrue(np.allclose(input_ids, py_input_ids, rtol=0, atol=0.01)) - self.assertTrue( - np.allclose(token_type_ids, py_token_type_ids, rtol=0, atol=0.01)) + np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) + np.testing.assert_allclose(token_type_ids, + py_token_type_ids, + rtol=0, + atol=0.01) # case 2: only one text and one text_pair (batch_size = 1) input_ids, token_type_ids = self.faster_tokenizer( @@ -238,9 +240,11 @@ class TestBertTokenizerOp(unittest.TestCase): py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1]) py_token_type_ids = np.array( encoded_inputs[0]["token_type_ids"]).reshape([1, -1]) - self.assertTrue(np.allclose(input_ids, py_input_ids, rtol=0, atol=0.01)) - self.assertTrue( - np.allclose(token_type_ids, py_token_type_ids, rtol=0, atol=0.01)) + np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) + np.testing.assert_allclose(token_type_ids, + py_token_type_ids, + rtol=0, + atol=0.01) # case 3: only texts (batch_size = 3) input_ids, token_type_ids = self.faster_tokenizer( @@ -261,9 +265,11 @@ class TestBertTokenizerOp(unittest.TestCase): py_token_type_ids = [i["token_type_ids"] for i in encoded_inputs] py_input_ids = np.array(py_input_ids).reshape([3, -1]) py_token_type_ids = np.array(py_token_type_ids).reshape([3, -1]) - self.assertTrue(np.allclose(input_ids, py_input_ids, rtol=0, atol=0.01)) - self.assertTrue( - np.allclose(token_type_ids, py_token_type_ids, rtol=0, atol=0.01)) + np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) + np.testing.assert_allclose(token_type_ids, + py_token_type_ids, + rtol=0, + atol=0.01) # case 4: texts and text pairs (batch_size = 3) input_ids, token_type_ids = self.faster_tokenizer( @@ -286,9 +292,11 @@ class TestBertTokenizerOp(unittest.TestCase): py_token_type_ids = [i["token_type_ids"] for i in encoded_inputs] py_input_ids = np.array(py_input_ids).reshape([3, -1]) py_token_type_ids = np.array(py_token_type_ids).reshape([3, -1]) - self.assertTrue(np.allclose(input_ids, py_input_ids, rtol=0, atol=0.01)) - self.assertTrue( - np.allclose(token_type_ids, py_token_type_ids, rtol=0, atol=0.01)) + np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) + np.testing.assert_allclose(token_type_ids, + py_token_type_ids, + rtol=0, + atol=0.01) def test_padding(self): with _test_eager_guard(): @@ -319,9 +327,11 @@ class TestBertTokenizerOp(unittest.TestCase): py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1]) py_token_type_ids = np.array( encoded_inputs[0]["token_type_ids"]).reshape([1, -1]) - self.assertTrue(np.allclose(input_ids, py_input_ids, rtol=0, atol=0.01)) - self.assertTrue( - np.allclose(token_type_ids, py_token_type_ids, rtol=0, atol=0.01)) + np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) + np.testing.assert_allclose(token_type_ids, + py_token_type_ids, + rtol=0, + atol=0.01) # case 2: only one text and one text_pair (batch_size = 1) input_ids, token_type_ids = self.faster_tokenizer( @@ -343,9 +353,11 @@ class TestBertTokenizerOp(unittest.TestCase): py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1]) py_token_type_ids = np.array( encoded_inputs[0]["token_type_ids"]).reshape([1, -1]) - self.assertTrue(np.allclose(input_ids, py_input_ids, rtol=0, atol=0.01)) - self.assertTrue( - np.allclose(token_type_ids, py_token_type_ids, rtol=0, atol=0.01)) + np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) + np.testing.assert_allclose(token_type_ids, + py_token_type_ids, + rtol=0, + atol=0.01) def test_no_padding(self): with _test_eager_guard(): @@ -367,9 +379,11 @@ class TestBertTokenizerOp(unittest.TestCase): py_input_ids = np.array(encoded_inputs["input_ids"]).reshape([1, -1]) py_token_type_ids = np.array(encoded_inputs["token_type_ids"]).reshape( [1, -1]) - self.assertTrue(np.allclose(input_ids, py_input_ids, rtol=0, atol=0.01)) - self.assertTrue( - np.allclose(token_type_ids, py_token_type_ids, rtol=0, atol=0.01)) + np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) + np.testing.assert_allclose(token_type_ids, + py_token_type_ids, + rtol=0, + atol=0.01) def test_is_split_into_words(self): with _test_eager_guard(): @@ -399,9 +413,11 @@ class TestBertTokenizerOp(unittest.TestCase): py_input_ids = np.array(encoded_inputs[0]["input_ids"]).reshape([1, -1]) py_token_type_ids = np.array( encoded_inputs[0]["token_type_ids"]).reshape([1, -1]) - self.assertTrue(np.allclose(input_ids, py_input_ids, rtol=0, atol=0.01)) - self.assertTrue( - np.allclose(token_type_ids, py_token_type_ids, rtol=0, atol=0.01)) + np.testing.assert_allclose(input_ids, py_input_ids, rtol=0, atol=0.01) + np.testing.assert_allclose(token_type_ids, + py_token_type_ids, + rtol=0, + atol=0.01) def test_feed_string_var(self): self.init_data() diff --git a/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py b/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py index ee168cc36c1..649384cc767 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py @@ -84,7 +84,7 @@ class TestFetchLoDTensorArray(unittest.TestCase): self.assertEqual(np.array(loss_v).shape, (device_num, )) self.assertEqual(np.array(array_v[0]).shape, (batch_size, 784)) self.assertEqual(np.array(array_v[1]).shape, (batch_size, 1)) - self.assertTrue(np.allclose(loss_v, array_v[2])) + np.testing.assert_allclose(loss_v, array_v[2], rtol=1e-05) def test_fetch_lod_tensor_array(self): if fluid.core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_fetch_var.py b/python/paddle/fluid/tests/unittests/test_fetch_var.py index 1641adbb30c..3b7a6167036 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_var.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_var.py @@ -33,10 +33,7 @@ class TestFetchVar(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_main_program(), feed={}, fetch_list=[]) fetched_x = fluid.executor._fetch_var("x") - np.testing.assert_array_equal(fetched_x, - self.val, - err_msg='fetch_x=%s val=%s' % - (fetched_x, self.val)) + np.testing.assert_array_equal(fetched_x, self.val) self.assertEqual(fetched_x.dtype, self.val.dtype) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_exe_dist_model_run.py b/python/paddle/fluid/tests/unittests/test_fleet_exe_dist_model_run.py index 159f3328470..9bde09ab724 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_exe_dist_model_run.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_exe_dist_model_run.py @@ -86,7 +86,9 @@ class TestDistModelRun(unittest.TestCase): print("load inference model api rst:", load_inference_model_rst) # step 5: compare two results - self.assertTrue(np.allclose(dist_model_rst, load_inference_model_rst)) + np.testing.assert_allclose(dist_model_rst, + load_inference_model_rst, + rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_fleet_executor.py b/python/paddle/fluid/tests/unittests/test_fleet_executor.py index b824df45e3e..f8469c8ed26 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_executor.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_executor.py @@ -84,8 +84,8 @@ class TestFleetExecutor(unittest.TestCase): z_data = x_data + y_data a_data = 2 * x_data + 3 * y_data res = self.run_fleet_executor(fluid.CUDAPlace(0), x_data, y_data) - self.assertTrue(np.allclose(res[0], z_data)) - self.assertTrue(np.allclose(res[1], a_data)) + np.testing.assert_allclose(res[0], z_data, rtol=1e-05) + np.testing.assert_allclose(res[1], a_data, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_fleet_executor_origin_scheduler.py b/python/paddle/fluid/tests/unittests/test_fleet_executor_origin_scheduler.py index 295530d9c9d..9a71937ddb2 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_executor_origin_scheduler.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_executor_origin_scheduler.py @@ -84,8 +84,8 @@ class TestFleetExecutor(unittest.TestCase): z_data = x_data + y_data a_data = 2 * x_data + 3 * y_data res = self.run_fleet_executor(fluid.CUDAPlace(0), x_data, y_data) - self.assertTrue(np.allclose(res[0], z_data)) - self.assertTrue(np.allclose(res[1], a_data)) + np.testing.assert_allclose(res[0], z_data, rtol=1e-05) + np.testing.assert_allclose(res[1], a_data, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_fleet_executor_with_task_nodes.py b/python/paddle/fluid/tests/unittests/test_fleet_executor_with_task_nodes.py index f531b85c3dd..52379a10f5b 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_executor_with_task_nodes.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_executor_with_task_nodes.py @@ -79,8 +79,8 @@ class TestFleetExecutor(unittest.TestCase): z_data = x_data + y_data a_data = 2 * x_data + 3 * y_data res = self.run_fleet_executor(fluid.CUDAPlace(0), x_data, y_data) - self.assertTrue(np.allclose(res[0], z_data)) - self.assertTrue(np.allclose(res[1], a_data)) + np.testing.assert_allclose(res[0], z_data, rtol=1e-05) + np.testing.assert_allclose(res[1], a_data, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_fmax_op.py b/python/paddle/fluid/tests/unittests/test_fmax_op.py index 359b98c4b49..593f3dd2571 100644 --- a/python/paddle/fluid/tests/unittests/test_fmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_fmax_op.py @@ -57,7 +57,7 @@ class ApiFMaxTest(unittest.TestCase): "y": self.input_y }, fetch_list=[result_fmax]) - self.assertTrue(np.allclose(res, self.np_expected1)) + np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -70,7 +70,7 @@ class ApiFMaxTest(unittest.TestCase): "z": self.input_z }, fetch_list=[result_fmax]) - self.assertTrue(np.allclose(res, self.np_expected2)) + np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -83,7 +83,7 @@ class ApiFMaxTest(unittest.TestCase): "c": self.input_c }, fetch_list=[result_fmax]) - self.assertTrue(np.allclose(res, self.np_expected3)) + np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -96,7 +96,7 @@ class ApiFMaxTest(unittest.TestCase): "c": self.input_c }, fetch_list=[result_fmax]) - self.assertTrue(np.allclose(res, self.np_expected4)) + np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) def test_dynamic_api(self): """test_dynamic_api""" @@ -111,20 +111,20 @@ class ApiFMaxTest(unittest.TestCase): res = paddle.fmax(x, y) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected1)) + np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) # test broadcast res = paddle.fmax(x, z) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected2)) + np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) res = paddle.fmax(a, c) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected3)) + np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) res = paddle.fmax(b, c) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected4)) + np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) class TestElementwiseFmaxOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_fmin_op.py b/python/paddle/fluid/tests/unittests/test_fmin_op.py index 88542ba9365..888ff2c8af3 100644 --- a/python/paddle/fluid/tests/unittests/test_fmin_op.py +++ b/python/paddle/fluid/tests/unittests/test_fmin_op.py @@ -59,7 +59,7 @@ class ApiFMinTest(unittest.TestCase): "y": self.input_y }, fetch_list=[result_fmin]) - self.assertTrue(np.allclose(res, self.np_expected1)) + np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -72,7 +72,7 @@ class ApiFMinTest(unittest.TestCase): "z": self.input_z }, fetch_list=[result_fmin]) - self.assertTrue(np.allclose(res, self.np_expected2)) + np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -85,7 +85,7 @@ class ApiFMinTest(unittest.TestCase): "c": self.input_c }, fetch_list=[result_fmin]) - self.assertTrue(np.allclose(res, self.np_expected3)) + np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -98,7 +98,7 @@ class ApiFMinTest(unittest.TestCase): "c": self.input_c }, fetch_list=[result_fmin]) - self.assertTrue(np.allclose(res, self.np_expected4)) + np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) def test_dynamic_api(self): """test_dynamic_api""" @@ -113,20 +113,20 @@ class ApiFMinTest(unittest.TestCase): res = paddle.fmin(x, y) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected1)) + np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) # test broadcast res = paddle.fmin(x, z) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected2)) + np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) res = paddle.fmin(a, c) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected3)) + np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) res = paddle.fmin(b, c) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected4)) + np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) class TestElementwiseFminOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_fold_op.py b/python/paddle/fluid/tests/unittests/test_fold_op.py index 8ae0442a1e7..0dec09ddf62 100644 --- a/python/paddle/fluid/tests/unittests/test_fold_op.py +++ b/python/paddle/fluid/tests/unittests/test_fold_op.py @@ -120,7 +120,9 @@ class TestFoldAPI(TestFoldOp): m = paddle.nn.Fold(**self.attrs) m.eval() result = m(input) - self.assertTrue(np.allclose(result.numpy(), self.outputs['Y'])) + np.testing.assert_allclose(result.numpy(), + self.outputs['Y'], + rtol=1e-05) def test_info(self): str(paddle.nn.Fold(**self.attrs)) diff --git a/python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py b/python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py index 8d287327603..f979e896fbd 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py @@ -286,8 +286,10 @@ class APITestStaticFusedFFN(unittest.TestCase): }, fetch_list=[res]) real_res.append(fetch) - self.assertTrue(np.allclose(real_res[0], real_res[1], atol=1e-3), - "two value is check diff") + np.testing.assert_allclose(real_res[0], + real_res[1], + rtol=1e-05, + atol=0.001) class TestFusedFFNOpError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_op.py b/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_op.py index 6aa995efa8d..1d70eb2d07b 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_op.py @@ -555,9 +555,9 @@ class TestEagerFusedGemmEpilogue(unittest.TestCase): out_np2 = get_output(x_np, y_np, bias_np, 'relu') out_np3 = get_output(x_np, y_np, bias_np, 'gelu') - self.assertTrue(np.allclose(out1, out_np1)) - self.assertTrue(np.allclose(out2, out_np2)) - self.assertTrue(np.allclose(out3, out_np3)) + np.testing.assert_allclose(out1, out_np1, rtol=1e-05) + np.testing.assert_allclose(out2, out_np2, rtol=1e-05) + np.testing.assert_allclose(out3, out_np3, rtol=1e-05) out_grad_np1 = np.random.randint(low=-20, high=20, size=out_np1.shape).astype(np.float64) @@ -566,9 +566,9 @@ class TestEagerFusedGemmEpilogue(unittest.TestCase): x_grad_np, y_grad_np, bias_grad_np = matmul_grad( x_np, y_np, bias_np, out_grad_np1, False, False) - self.assertTrue(np.allclose(x.grad.numpy(), x_grad_np)) + np.testing.assert_allclose(x.grad.numpy(), x_grad_np, rtol=1e-05) self.assertEqual(y_grad_np.shape, y_np.shape) - self.assertTrue(np.allclose(y.grad.numpy(), y_grad_np)) + np.testing.assert_allclose(y.grad.numpy(), y_grad_np, rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_fused_transformer_encoder_layer.py b/python/paddle/fluid/tests/unittests/test_fused_transformer_encoder_layer.py index 882258239d0..50632b3f76f 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_transformer_encoder_layer.py +++ b/python/paddle/fluid/tests/unittests/test_fused_transformer_encoder_layer.py @@ -162,11 +162,10 @@ class TestFusedTransformerEncoderLayer(unittest.TestCase): base_out.numpy(), rtol=self.rtol, atol=self.atol) - self.assertTrue( - np.allclose(fused_out.grad.numpy(), - base_out.grad.numpy(), - rtol=self.rtol, - atol=self.atol)) + np.testing.assert_allclose(fused_out.grad.numpy(), + base_out.grad.numpy(), + rtol=self.rtol, + atol=self.atol) class TestFusedTransformerEncoderLayerAct(TestFusedTransformerEncoderLayer): diff --git a/python/paddle/fluid/tests/unittests/test_gather_op.py b/python/paddle/fluid/tests/unittests/test_gather_op.py index 6a0fdc4ff61..c4d19e6e904 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_op.py @@ -251,7 +251,7 @@ class API_TestGather(unittest.TestCase): }, fetch_list=[out]) expected_output = np.array([[3, 4], [5, 6]]) - self.assertTrue(np.allclose(result, expected_output)) + np.testing.assert_allclose(result, expected_output, rtol=1e-05) def test_out2(self): with paddle.static.program_guard(paddle.static.Program(), @@ -272,7 +272,7 @@ class API_TestGather(unittest.TestCase): }, fetch_list=[out]) expected_output = gather_numpy(x_np, index_np, axis_np[0]) - self.assertTrue(np.allclose(result, expected_output)) + np.testing.assert_allclose(result, expected_output, rtol=1e-05) class API_TestDygraphGather(unittest.TestCase): @@ -286,7 +286,7 @@ class API_TestDygraphGather(unittest.TestCase): output = paddle.fluid.layers.gather(input, index) output_np = output.numpy() expected_output = np.array([[3, 4], [5, 6]]) - self.assertTrue(np.allclose(output_np, expected_output)) + np.testing.assert_allclose(output_np, expected_output, rtol=1e-05) paddle.enable_static() def test_out12(self): @@ -298,7 +298,7 @@ class API_TestDygraphGather(unittest.TestCase): output = paddle.gather(x, index, axis=0) output_np = output.numpy() expected_output = gather_numpy(input_1, index_1, axis=0) - self.assertTrue(np.allclose(output_np, expected_output)) + np.testing.assert_allclose(output_np, expected_output, rtol=1e-05) paddle.enable_static() def test_zero_index(self): diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index 43eaa7bf6a1..0f43068ed8b 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -66,8 +66,7 @@ class TestGaussianRandomOp(OpTest): hist2, _ = np.histogram(data, range=(-3, 5)) hist2 = hist2.astype("float32") hist2 /= float(outs[0].size) - self.assertTrue(np.allclose(hist, hist2, rtol=0, atol=0.01), - "hist: " + str(hist) + " hist2: " + str(hist2)) + np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01) @unittest.skipIf(not core.is_compiled_with_cuda(), @@ -114,8 +113,7 @@ class TestGaussianRandomBF16Op(OpTest): hist2, _ = np.histogram(data, range=(-3, 5)) hist2 = hist2.astype("float32") hist2 /= float(outs[0].size) - self.assertTrue(np.allclose(hist, hist2, rtol=0, atol=0.05), - "hist: " + str(hist) + " hist2: " + str(hist2)) + np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.05) class TestMeanStdAreInt(TestGaussianRandomOp): @@ -363,7 +361,9 @@ class TestRandomValue(unittest.TestCase): def _check_random_value(dtype, expect, expect_mean, expect_std): x = paddle.randn([32, 3, 1024, 1024], dtype=dtype) actual = x.numpy() - self.assertTrue(np.allclose(actual[2, 1, 512, 1000:1010], expect)) + np.testing.assert_allclose(actual[2, 1, 512, 1000:1010], + expect, + rtol=1e-05) self.assertTrue(np.mean(actual), expect_mean) self.assertTrue(np.std(actual), expect_std) diff --git a/python/paddle/fluid/tests/unittests/test_gcd.py b/python/paddle/fluid/tests/unittests/test_gcd.py index b3ada9cdaa6..82718094da7 100644 --- a/python/paddle/fluid/tests/unittests/test_gcd.py +++ b/python/paddle/fluid/tests/unittests/test_gcd.py @@ -58,8 +58,9 @@ class TestGcdAPI(unittest.TestCase): x = paddle.to_tensor(self.x_np) y = paddle.to_tensor(self.y_np) result = paddle.gcd(x, y) - self.assertEqual( - np.allclose(np.gcd(self.x_np, self.y_np), result.numpy()), True) + np.testing.assert_allclose(np.gcd(self.x_np, self.y_np), + result.numpy(), + rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_gelu_op.py b/python/paddle/fluid/tests/unittests/test_gelu_op.py index f6fa4e2da59..575af7eeabf 100644 --- a/python/paddle/fluid/tests/unittests/test_gelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_gelu_op.py @@ -44,7 +44,7 @@ class TestGeluOp(unittest.TestCase): x_var = dg.to_variable(x) y_var = fluid.layers.gelu(x_var, approximate) y_test = y_var.numpy() - self.assertTrue(np.allclose(y_ref, y_test, rtol=1e-05, atol=1e-08)) + np.testing.assert_allclose(y_ref, y_test, rtol=1e-05, atol=1e-08) def _test_case1_gpu(self, approximate): x = np.random.uniform(-1, 1, size=(11, 17)).astype(np.float32) @@ -55,7 +55,7 @@ class TestGeluOp(unittest.TestCase): x_var = dg.to_variable(x) y_var = fluid.layers.gelu(x_var, approximate) y_test = y_var.numpy() - self.assertTrue(np.allclose(y_ref, y_test, rtol=1e-05, atol=1e-08)) + np.testing.assert_allclose(y_ref, y_test, rtol=1e-05, atol=1e-08) def test_cases(self): for approximate in [True, False]: @@ -87,10 +87,12 @@ class TestGeluOp(unittest.TestCase): use_fast_math(False) y_ref, x_g_ref = run_gelu_op(True) - self.assertTrue(np.allclose(y_ref, y_fast_math, rtol=1e-5, atol=5e-4)) + np.testing.assert_allclose(y_ref, y_fast_math, rtol=1e-05, atol=0.0005) - self.assertTrue( - np.allclose(x_g_ref, x_g_fast_math, rtol=1e-5, atol=5e-4)) + np.testing.assert_allclose(x_g_ref, + x_g_fast_math, + rtol=1e-05, + atol=0.0005) def test_fast_math_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_generate_proposals_v2_op.py b/python/paddle/fluid/tests/unittests/test_generate_proposals_v2_op.py index 506176d146c..738a2196306 100644 --- a/python/paddle/fluid/tests/unittests/test_generate_proposals_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_generate_proposals_v2_op.py @@ -297,9 +297,9 @@ class TestGenerateProposalsV2Op(OpTest): # pre_nms_top_n=10, # post_nms_top_n=5, # return_rois_num=True) -# self.assertTrue(np.allclose(self.roi_expected, rois.numpy())) -# self.assertTrue(np.allclose(self.roi_probs_expected, roi_probs.numpy())) -# self.assertTrue(np.allclose(self.rois_num_expected, rois_num.numpy())) +# np.testing.assert_allclose(self.roi_expected, rois.numpy(), rtol=1e-5) +# np.testing.assert_allclose(self.roi_probs_expected, roi_probs.numpy(), rtol=1e-5) +# np.testing.assert_allclose(self.rois_num_expected, rois_num.numpy(), rtol=1e-5) # def test_static(self): # paddle.enable_static() @@ -340,10 +340,9 @@ class TestGenerateProposalsV2Op(OpTest): # fetch_list=[rois.name, roi_probs.name, rois_num.name], # return_numpy=False) -# self.assertTrue(np.allclose(self.roi_expected, np.array(rois))) -# self.assertTrue( -# np.allclose(self.roi_probs_expected, np.array(roi_probs))) -# self.assertTrue(np.allclose(self.rois_num_expected, np.array(rois_num))) +# np.testing.assert_allclose(self.roi_expected, np.array(rois), rtol=1e-5) +# np.testing.assert_allclose(self.roi_probs_expected, np.array(roi_probs), rtol=1e-5) +# np.testing.assert_allclose(self.rois_num_expected, np.array(rois_num), rtol=1e-5) if __name__ == '__main__': paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_grad_clip_minimize.py b/python/paddle/fluid/tests/unittests/test_grad_clip_minimize.py index 15009ea8c58..1d0a090c9c8 100644 --- a/python/paddle/fluid/tests/unittests/test_grad_clip_minimize.py +++ b/python/paddle/fluid/tests/unittests/test_grad_clip_minimize.py @@ -1,257 +1,257 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import contextlib -import unittest -import numpy as np -import six - -import paddle -import paddle.fluid as fluid -from paddle.fluid import core - -from paddle.fluid.dygraph.base import to_variable - -from paddle.fluid.clip import GradientClipByValue, GradientClipByNorm, GradientClipByGlobalNorm - - -class TestGradClipByGlobalNorm(unittest.TestCase): - - def init_value(self): - self.max_global_norm = 5.0 - self.init_scale = 1.0 - - self.shape = (20, 20) - - def generate_p_g(self): - - self.para_and_grad = [] - for i in range(10): - self.para_and_grad.append( - (np.random.uniform(-self.init_scale, self.init_scale, - self.shape).astype('float32'), - np.random.uniform(-self.init_scale, self.init_scale, - self.shape).astype('float32'))) - - def get_numpy_global_norm_result(self): - gloabl_norm = 0.0 - for p, g in self.para_and_grad: - gloabl_norm += np.sum(np.square(g)) - - gloabl_norm_np = np.sqrt(gloabl_norm) - - new_np_p_g = [] - scale = 1.0 - if gloabl_norm_np > self.max_global_norm: - scale = self.max_global_norm / gloabl_norm_np - - for p, g in self.para_and_grad: - new_np_p_g.append((p, g * scale)) - - return new_np_p_g - - def get_dygrap_global_norm_result(self): - with fluid.dygraph.guard(): - - gloabl_norm_clip = GradientClipByGlobalNorm(self.max_global_norm) - p_g_var = [] - for p, g in self.para_and_grad: - new_p = to_variable(p) - new_g = to_variable(g) - p_g_var.append((new_p, new_g)) - - new_p_g_var = gloabl_norm_clip(p_g_var) - - p_g_dy_out = [] - for p, g in new_p_g_var: - p_g_dy_out.append((p.numpy(), g.numpy())) - - return p_g_dy_out - - def test_clip_by_global_norm(self): - self.init_value() - self.generate_p_g() - np_p_g = self.get_numpy_global_norm_result() - dy_out_p_g = self.get_dygrap_global_norm_result() - - for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): - self.assertTrue(np.allclose(g_np, g_dy, rtol=1e-6, atol=1e-8)) - - def test_clip_by_global_norm_2(self): - self.init_value() - - self.init_scale = 0.2 - self.max_global_norm = 10 - self.generate_p_g() - np_p_g = self.get_numpy_global_norm_result() - dy_out_p_g = self.get_dygrap_global_norm_result() - - for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): - self.assertTrue(np.allclose(g_np, g_dy, rtol=1e-6, atol=1e-8)) - - -class TestGradClipByNorm(unittest.TestCase): - - def init_value(self): - self.max_norm = 5.0 - self.init_scale = 1.0 - - self.shape = (10, 10) - - def generate_p_g(self): - - self.para_and_grad = [] - for i in range(10): - self.para_and_grad.append( - (np.random.uniform(-self.init_scale, self.init_scale, - self.shape).astype('float32'), - np.random.uniform(-self.init_scale, self.init_scale, - self.shape).astype('float32'))) - - def get_numpy_norm_result(self): - - new_p_g = [] - for p, g in self.para_and_grad: - norm = np.sqrt(np.sum(np.square(g))) - - if norm > self.max_norm: - new_p_g.append((p, g * self.max_norm / norm)) - else: - new_p_g.append((p, g)) - - return new_p_g - - def get_dygrap_norm_result(self): - with fluid.dygraph.guard(): - - norm_clip = GradientClipByNorm(self.max_norm) - p_g_var = [] - for p, g in self.para_and_grad: - new_p = to_variable(p) - new_g = to_variable(g) - p_g_var.append((new_p, new_g)) - - new_p_g_var = norm_clip(p_g_var) - - p_g_dy_out = [] - for p, g in new_p_g_var: - p_g_dy_out.append((p.numpy(), g.numpy())) - - return p_g_dy_out - - def test_clip_by_norm(self): - self.init_value() - self.generate_p_g() - np_p_g = self.get_numpy_norm_result() - dy_out_p_g = self.get_dygrap_norm_result() - - for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): - self.assertTrue(np.allclose(g_np, g_dy, rtol=1e-6, atol=1e-8)) - - def test_clip_by_norm_2(self): - self.init_value() - - self.init_scale = 0.2 - self.max_norm = 10.0 - self.generate_p_g() - np_p_g = self.get_numpy_norm_result() - dy_out_p_g = self.get_dygrap_norm_result() - - for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): - self.assertTrue(np.allclose(g_np, g_dy, rtol=1e-6, atol=1e-8)) - - -class TestGradClipByValue(unittest.TestCase): - - def init_value(self): - self.max_value = 0.8 - self.min_value = -0.1 - self.init_scale = 1.0 - - self.shape = (10, 10) - - def generate_p_g(self): - - self.para_and_grad = [] - for i in range(10): - self.para_and_grad.append( - (np.random.uniform(-self.init_scale, self.init_scale, - self.shape).astype('float32'), - np.random.uniform(-self.init_scale, self.init_scale, - self.shape).astype('float32'))) - - def get_numpy_clip_result(self): - - new_p_g = [] - for p, g in self.para_and_grad: - new_p_g.append((p, np.clip(g, self.min_value, self.max_value))) - - return new_p_g - - def get_dygrap_clip_result(self): - with fluid.dygraph.guard(): - value_clip = GradientClipByValue(max=self.max_value, - min=self.min_value) - p_g_var = [] - for p, g in self.para_and_grad: - new_p = to_variable(p) - new_g = to_variable(g) - p_g_var.append((new_p, new_g)) - - new_p_g_var = value_clip(p_g_var) - - p_g_dy_out = [] - for p, g in new_p_g_var: - p_g_dy_out.append((p.numpy(), g.numpy())) - - return p_g_dy_out - - def test_clip_by_value(self): - self.init_value() - self.generate_p_g() - np_p_g = self.get_numpy_clip_result() - dy_out_p_g = self.get_dygrap_clip_result() - - for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): - self.assertTrue(np.allclose(g_np, g_dy, rtol=1e-6, atol=1e-8)) - - def test_clip_by_value_2(self): - self.init_value() - - self.init_scale = 0.2 - self.generate_p_g() - np_p_g = self.get_numpy_clip_result() - dy_out_p_g = self.get_dygrap_clip_result() - - for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): - self.assertTrue(np.allclose(g_np, g_dy, rtol=1e-6, atol=1e-8)) - - def test_clip_by_value_3(self): - self.init_value() - - self.init_scale = 0.5 - self.max_value = 0.6 - self.min_value = None - self.generate_p_g() - np_p_g = self.get_numpy_clip_result() - dy_out_p_g = self.get_dygrap_clip_result() - - for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): - self.assertTrue(np.allclose(g_np, g_dy, rtol=1e-6, atol=1e-8)) - - -if __name__ == '__main__': - unittest.main() +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import contextlib +import unittest +import numpy as np +import six + +import paddle +import paddle.fluid as fluid +from paddle.fluid import core + +from paddle.fluid.dygraph.base import to_variable + +from paddle.fluid.clip import GradientClipByValue, GradientClipByNorm, GradientClipByGlobalNorm + + +class TestGradClipByGlobalNorm(unittest.TestCase): + + def init_value(self): + self.max_global_norm = 5.0 + self.init_scale = 1.0 + + self.shape = (20, 20) + + def generate_p_g(self): + + self.para_and_grad = [] + for i in range(10): + self.para_and_grad.append( + (np.random.uniform(-self.init_scale, self.init_scale, + self.shape).astype('float32'), + np.random.uniform(-self.init_scale, self.init_scale, + self.shape).astype('float32'))) + + def get_numpy_global_norm_result(self): + gloabl_norm = 0.0 + for p, g in self.para_and_grad: + gloabl_norm += np.sum(np.square(g)) + + gloabl_norm_np = np.sqrt(gloabl_norm) + + new_np_p_g = [] + scale = 1.0 + if gloabl_norm_np > self.max_global_norm: + scale = self.max_global_norm / gloabl_norm_np + + for p, g in self.para_and_grad: + new_np_p_g.append((p, g * scale)) + + return new_np_p_g + + def get_dygrap_global_norm_result(self): + with fluid.dygraph.guard(): + + gloabl_norm_clip = GradientClipByGlobalNorm(self.max_global_norm) + p_g_var = [] + for p, g in self.para_and_grad: + new_p = to_variable(p) + new_g = to_variable(g) + p_g_var.append((new_p, new_g)) + + new_p_g_var = gloabl_norm_clip(p_g_var) + + p_g_dy_out = [] + for p, g in new_p_g_var: + p_g_dy_out.append((p.numpy(), g.numpy())) + + return p_g_dy_out + + def test_clip_by_global_norm(self): + self.init_value() + self.generate_p_g() + np_p_g = self.get_numpy_global_norm_result() + dy_out_p_g = self.get_dygrap_global_norm_result() + + for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): + np.testing.assert_allclose(g_np, g_dy, rtol=1e-06, atol=1e-08) + + def test_clip_by_global_norm_2(self): + self.init_value() + + self.init_scale = 0.2 + self.max_global_norm = 10 + self.generate_p_g() + np_p_g = self.get_numpy_global_norm_result() + dy_out_p_g = self.get_dygrap_global_norm_result() + + for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): + np.testing.assert_allclose(g_np, g_dy, rtol=1e-06, atol=1e-08) + + +class TestGradClipByNorm(unittest.TestCase): + + def init_value(self): + self.max_norm = 5.0 + self.init_scale = 1.0 + + self.shape = (10, 10) + + def generate_p_g(self): + + self.para_and_grad = [] + for i in range(10): + self.para_and_grad.append( + (np.random.uniform(-self.init_scale, self.init_scale, + self.shape).astype('float32'), + np.random.uniform(-self.init_scale, self.init_scale, + self.shape).astype('float32'))) + + def get_numpy_norm_result(self): + + new_p_g = [] + for p, g in self.para_and_grad: + norm = np.sqrt(np.sum(np.square(g))) + + if norm > self.max_norm: + new_p_g.append((p, g * self.max_norm / norm)) + else: + new_p_g.append((p, g)) + + return new_p_g + + def get_dygrap_norm_result(self): + with fluid.dygraph.guard(): + + norm_clip = GradientClipByNorm(self.max_norm) + p_g_var = [] + for p, g in self.para_and_grad: + new_p = to_variable(p) + new_g = to_variable(g) + p_g_var.append((new_p, new_g)) + + new_p_g_var = norm_clip(p_g_var) + + p_g_dy_out = [] + for p, g in new_p_g_var: + p_g_dy_out.append((p.numpy(), g.numpy())) + + return p_g_dy_out + + def test_clip_by_norm(self): + self.init_value() + self.generate_p_g() + np_p_g = self.get_numpy_norm_result() + dy_out_p_g = self.get_dygrap_norm_result() + + for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): + np.testing.assert_allclose(g_np, g_dy, rtol=1e-06, atol=1e-08) + + def test_clip_by_norm_2(self): + self.init_value() + + self.init_scale = 0.2 + self.max_norm = 10.0 + self.generate_p_g() + np_p_g = self.get_numpy_norm_result() + dy_out_p_g = self.get_dygrap_norm_result() + + for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): + np.testing.assert_allclose(g_np, g_dy, rtol=1e-06, atol=1e-08) + + +class TestGradClipByValue(unittest.TestCase): + + def init_value(self): + self.max_value = 0.8 + self.min_value = -0.1 + self.init_scale = 1.0 + + self.shape = (10, 10) + + def generate_p_g(self): + + self.para_and_grad = [] + for i in range(10): + self.para_and_grad.append( + (np.random.uniform(-self.init_scale, self.init_scale, + self.shape).astype('float32'), + np.random.uniform(-self.init_scale, self.init_scale, + self.shape).astype('float32'))) + + def get_numpy_clip_result(self): + + new_p_g = [] + for p, g in self.para_and_grad: + new_p_g.append((p, np.clip(g, self.min_value, self.max_value))) + + return new_p_g + + def get_dygrap_clip_result(self): + with fluid.dygraph.guard(): + value_clip = GradientClipByValue(max=self.max_value, + min=self.min_value) + p_g_var = [] + for p, g in self.para_and_grad: + new_p = to_variable(p) + new_g = to_variable(g) + p_g_var.append((new_p, new_g)) + + new_p_g_var = value_clip(p_g_var) + + p_g_dy_out = [] + for p, g in new_p_g_var: + p_g_dy_out.append((p.numpy(), g.numpy())) + + return p_g_dy_out + + def test_clip_by_value(self): + self.init_value() + self.generate_p_g() + np_p_g = self.get_numpy_clip_result() + dy_out_p_g = self.get_dygrap_clip_result() + + for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): + np.testing.assert_allclose(g_np, g_dy, rtol=1e-06, atol=1e-08) + + def test_clip_by_value_2(self): + self.init_value() + + self.init_scale = 0.2 + self.generate_p_g() + np_p_g = self.get_numpy_clip_result() + dy_out_p_g = self.get_dygrap_clip_result() + + for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): + np.testing.assert_allclose(g_np, g_dy, rtol=1e-06, atol=1e-08) + + def test_clip_by_value_3(self): + self.init_value() + + self.init_scale = 0.5 + self.max_value = 0.6 + self.min_value = None + self.generate_p_g() + np_p_g = self.get_numpy_clip_result() + dy_out_p_g = self.get_dygrap_clip_result() + + for (p_np, g_np), (p_dy, g_dy) in zip(np_p_g, dy_out_p_g): + np.testing.assert_allclose(g_np, g_dy, rtol=1e-06, atol=1e-08) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_gradient_clip.py b/python/paddle/fluid/tests/unittests/test_gradient_clip.py index e84a3f03296..7140148119e 100644 --- a/python/paddle/fluid/tests/unittests/test_gradient_clip.py +++ b/python/paddle/fluid/tests/unittests/test_gradient_clip.py @@ -161,9 +161,13 @@ class TestGradientClipByGlobalNorm(TestGradientClip): out[i] = scale * out[i] for u, v in zip(out, out_clip): - self.assertTrue( - np.allclose(a=u, b=v, rtol=1e-5, atol=1e-8), - "gradient clip by global norm has wrong results!, \nu={}\nv={}\ndiff={}" + np.testing.assert_allclose( + u, + v, + rtol=1e-05, + atol=1e-08, + err_msg= + 'gradient clip by global norm has wrong results!, \nu={}\nv={}\ndiff={}' .format(u, v, u - v)) # test whether the output is right when use 'set_gradient_clip' @@ -271,8 +275,12 @@ class TestGradientClipByNorm(TestGradientClip): norm = np.sqrt(np.sum(np.power(u, 2))) scale = self.clip_norm / np.maximum(self.clip_norm, norm) u = u * scale - self.assertTrue(np.allclose(a=u, b=v, rtol=1e-5, atol=1e-8), - "gradient clip by norm has wrong results!") + np.testing.assert_allclose( + u, + v, + rtol=1e-05, + atol=1e-08, + err_msg='gradient clip by norm has wrong results!') # test whether the output is right when use grad_clip def test_gradient_clip(self): @@ -315,8 +323,12 @@ class TestGradientClipByValue(TestGradientClip): out[i] = np.clip(v, self.min, self.max) for u, v in zip(out, out_clip): u = np.clip(u, self.min, self.max) - self.assertTrue(np.allclose(a=u, b=v, rtol=1e-6, atol=1e-8), - "gradient clip by value has wrong results!") + np.testing.assert_allclose( + u, + v, + rtol=1e-06, + atol=1e-08, + err_msg='gradient clip by value has wrong results!') # test whether the output is right when use grad_clip def test_gradient_clip(self): @@ -458,8 +470,12 @@ class TestDygraphGradientClipByValue(TestDygraphGradientClip): for u, v in zip(grads, grads_clip): u = np.clip(u.numpy(), self.min, self.max) v = v.numpy() - self.assertTrue(np.allclose(a=u, b=v, rtol=1e-6, atol=1e-8), - "gradient clip by value has wrong results!") + np.testing.assert_allclose( + u, + v, + rtol=1e-06, + atol=1e-08, + err_msg='gradient clip by value has wrong results!') class SimpleNet(paddle.nn.Layer): diff --git a/python/paddle/fluid/tests/unittests/test_graph_reindex.py b/python/paddle/fluid/tests/unittests/test_graph_reindex.py index 1323aaeb02b..11078fae5b6 100644 --- a/python/paddle/fluid/tests/unittests/test_graph_reindex.py +++ b/python/paddle/fluid/tests/unittests/test_graph_reindex.py @@ -51,17 +51,17 @@ class TestGraphReindex(unittest.TestCase): reindex_src, reindex_dst, out_nodes = \ paddle.incubate.graph_reindex(x, neighbors, count) - self.assertTrue(np.allclose(self.reindex_src, reindex_src)) - self.assertTrue(np.allclose(self.reindex_dst, reindex_dst)) - self.assertTrue(np.allclose(self.out_nodes, out_nodes)) + np.testing.assert_allclose(self.reindex_src, reindex_src, rtol=1e-05) + np.testing.assert_allclose(self.reindex_dst, reindex_dst, rtol=1e-05) + np.testing.assert_allclose(self.out_nodes, out_nodes, rtol=1e-05) reindex_src, reindex_dst, out_nodes = \ paddle.incubate.graph_reindex(x, neighbors, count, value_buffer, index_buffer, flag_buffer_hashtable=True) - self.assertTrue(np.allclose(self.reindex_src, reindex_src)) - self.assertTrue(np.allclose(self.reindex_dst, reindex_dst)) - self.assertTrue(np.allclose(self.out_nodes, out_nodes)) + np.testing.assert_allclose(self.reindex_src, reindex_src, rtol=1e-05) + np.testing.assert_allclose(self.reindex_dst, reindex_dst, rtol=1e-05) + np.testing.assert_allclose(self.out_nodes, out_nodes, rtol=1e-05) def test_heter_reindex_result(self): paddle.disable_static() @@ -73,19 +73,19 @@ class TestGraphReindex(unittest.TestCase): reindex_src, reindex_dst, out_nodes = \ paddle.incubate.graph_reindex(x, neighbors, count) - self.assertTrue( - np.allclose(self.reindex_src, - reindex_src[:self.neighbors.shape[0]])) - self.assertTrue( - np.allclose(self.reindex_src, - reindex_src[self.neighbors.shape[0]:])) - self.assertTrue( - np.allclose(self.reindex_dst, - reindex_dst[:self.neighbors.shape[0]])) - self.assertTrue( - np.allclose(self.reindex_dst, - reindex_dst[self.neighbors.shape[0]:])) - self.assertTrue(np.allclose(self.out_nodes, out_nodes)) + np.testing.assert_allclose(self.reindex_src, + reindex_src[:self.neighbors.shape[0]], + rtol=1e-05) + np.testing.assert_allclose(self.reindex_src, + reindex_src[self.neighbors.shape[0]:], + rtol=1e-05) + np.testing.assert_allclose(self.reindex_dst, + reindex_dst[:self.neighbors.shape[0]], + rtol=1e-05) + np.testing.assert_allclose(self.reindex_dst, + reindex_dst[self.neighbors.shape[0]:], + rtol=1e-05) + np.testing.assert_allclose(self.out_nodes, out_nodes, rtol=1e-05) def test_heter_reindex_result_v2(self): paddle.disable_static() @@ -116,9 +116,9 @@ class TestGraphReindex(unittest.TestCase): paddle.incubate.graph_reindex(paddle.to_tensor(x), paddle.to_tensor(neighbors), paddle.to_tensor(counts)) - self.assertTrue(np.allclose(reindex_src, reindex_src_)) - self.assertTrue(np.allclose(reindex_dst, reindex_dst_)) - self.assertTrue(np.allclose(out_nodes, out_nodes_)) + np.testing.assert_allclose(reindex_src, reindex_src_, rtol=1e-05) + np.testing.assert_allclose(reindex_dst, reindex_dst_, rtol=1e-05) + np.testing.assert_allclose(out_nodes, out_nodes_, rtol=1e-05) def test_reindex_result_static(self): paddle.enable_static() @@ -165,12 +165,20 @@ class TestGraphReindex(unittest.TestCase): ]) reindex_src_1, reindex_dst_1, out_nodes_1, reindex_src_2, \ reindex_dst_2, out_nodes_2 = ret - self.assertTrue(np.allclose(self.reindex_src, reindex_src_1)) - self.assertTrue(np.allclose(self.reindex_dst, reindex_dst_1)) - self.assertTrue(np.allclose(self.out_nodes, out_nodes_1)) - self.assertTrue(np.allclose(self.reindex_src, reindex_src_2)) - self.assertTrue(np.allclose(self.reindex_dst, reindex_dst_2)) - self.assertTrue(np.allclose(self.out_nodes, out_nodes_2)) + np.testing.assert_allclose(self.reindex_src, + reindex_src_1, + rtol=1e-05) + np.testing.assert_allclose(self.reindex_dst, + reindex_dst_1, + rtol=1e-05) + np.testing.assert_allclose(self.out_nodes, out_nodes_1, rtol=1e-05) + np.testing.assert_allclose(self.reindex_src, + reindex_src_2, + rtol=1e-05) + np.testing.assert_allclose(self.reindex_dst, + reindex_dst_2, + rtol=1e-05) + np.testing.assert_allclose(self.out_nodes, out_nodes_2, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_graph_send_recv_op.py b/python/paddle/fluid/tests/unittests/test_graph_send_recv_op.py index 81fcf06167e..8fb97ef3487 100644 --- a/python/paddle/fluid/tests/unittests/test_graph_send_recv_op.py +++ b/python/paddle/fluid/tests/unittests/test_graph_send_recv_op.py @@ -263,9 +263,7 @@ class API_GraphSendRecvOpTest(unittest.TestCase): fetch_list=[res_sum, res_mean, res_max, res_min]) for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): - self.assertTrue( - np.allclose(np_res, ret_res, atol=1e-6), "two value is\ - {}\n{}, check diff!".format(np_res, ret_res)) + np.testing.assert_allclose(np_res, ret_res, rtol=1e-05, atol=1e-06) def test_dygraph(self): paddle.disable_static() @@ -290,9 +288,7 @@ class API_GraphSendRecvOpTest(unittest.TestCase): ret = [res_sum, res_mean, res_max, res_min] for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): - self.assertTrue( - np.allclose(np_res, ret_res, atol=1e-6), "two value is\ - {}\n{}, check diff!".format(np_res, ret_res)) + np.testing.assert_allclose(np_res, ret_res, rtol=1e-05, atol=1e-06) def test_int32_input(self): paddle.disable_static() @@ -317,9 +313,7 @@ class API_GraphSendRecvOpTest(unittest.TestCase): ret = [res_sum, res_mean, res_max, res_min] for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): - self.assertTrue( - np.allclose(np_res, ret_res, atol=1e-6), "two value is\ - {}\n{}, check diff!".format(np_res, ret_res)) + np.testing.assert_allclose(np_res, ret_res, rtol=1e-05, atol=1e-06) def test_set_outsize_gpu(self): paddle.disable_static() @@ -335,14 +329,11 @@ class API_GraphSendRecvOpTest(unittest.TestCase): np_res = np.array([[0, 2, 3], [1, 6, 8], [0, 0, 0]], dtype="float32") np_res_set_outsize = np.array([[0, 2, 3], [1, 6, 8]], dtype="float32") - self.assertTrue( - np.allclose(np_res, res, atol=1e-6), "two value is\ - {}\n{}, check diff!".format(np_res, res)) - self.assertTrue( - np.allclose(np_res_set_outsize, res_set_outsize, atol=1e-6), - "two value is\ - {}\n{}, check diff!".format(np_res_set_outsize, - res_set_outsize)) + np.testing.assert_allclose(np_res, res, rtol=1e-05, atol=1e-06) + np.testing.assert_allclose(np_res_set_outsize, + res_set_outsize, + rtol=1e-05, + atol=1e-06) def test_out_size_tensor_static(self): paddle.enable_static() @@ -372,9 +363,7 @@ class API_GraphSendRecvOpTest(unittest.TestCase): 'out_size': data4, }, fetch_list=[res_sum]) - self.assertTrue( - np.allclose(np_sum, ret[0], atol=1e-6), "two value is\ - {}\n{}, check diff!".format(np_sum, ret[0])) + np.testing.assert_allclose(np_sum, ret[0], rtol=1e-05, atol=1e-06) def test_api_eager_dygraph(self): with _test_eager_guard(): @@ -423,9 +412,7 @@ class API_GeometricSendURecvTest(unittest.TestCase): fetch_list=[res_sum, res_mean, res_max, res_min]) for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): - self.assertTrue( - np.allclose(np_res, ret_res, atol=1e-6), "two value is\ - {}\n{}, check diff!".format(np_res, ret_res)) + np.testing.assert_allclose(np_res, ret_res, rtol=1e-05, atol=1e-06) def test_dygraph(self): paddle.disable_static() @@ -446,9 +433,7 @@ class API_GeometricSendURecvTest(unittest.TestCase): ret = [res_sum, res_mean, res_max, res_min] for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): - self.assertTrue( - np.allclose(np_res, ret_res, atol=1e-6), "two value is\ - {}\n{}, check diff!".format(np_res, ret_res)) + np.testing.assert_allclose(np_res, ret_res, rtol=1e-05, atol=1e-06) def test_int32_input(self): paddle.disable_static() @@ -469,9 +454,7 @@ class API_GeometricSendURecvTest(unittest.TestCase): ret = [res_sum, res_mean, res_max, res_min] for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): - self.assertTrue( - np.allclose(np_res, ret_res, atol=1e-6), "two value is\ - {}\n{}, check diff!".format(np_res, ret_res)) + np.testing.assert_allclose(np_res, ret_res, rtol=1e-05, atol=1e-06) def test_set_outsize_gpu(self): paddle.disable_static() @@ -487,14 +470,11 @@ class API_GeometricSendURecvTest(unittest.TestCase): np_res = np.array([[0, 2, 3], [1, 6, 8], [0, 0, 0]], dtype="float32") np_res_set_outsize = np.array([[0, 2, 3], [1, 6, 8]], dtype="float32") - self.assertTrue( - np.allclose(np_res, res, atol=1e-6), "two value is\ - {}\n{}, check diff!".format(np_res, res)) - self.assertTrue( - np.allclose(np_res_set_outsize, res_set_outsize, atol=1e-6), - "two value is\ - {}\n{}, check diff!".format(np_res_set_outsize, - res_set_outsize)) + np.testing.assert_allclose(np_res, res, rtol=1e-05, atol=1e-06) + np.testing.assert_allclose(np_res_set_outsize, + res_set_outsize, + rtol=1e-05, + atol=1e-06) def test_out_size_tensor_static(self): paddle.enable_static() @@ -524,9 +504,7 @@ class API_GeometricSendURecvTest(unittest.TestCase): 'out_size': data4, }, fetch_list=[res_sum]) - self.assertTrue( - np.allclose(np_sum, ret[0], atol=1e-6), "two value is\ - {}\n{}, check diff!".format(np_sum, ret[0])) + np.testing.assert_allclose(np_sum, ret[0], rtol=1e-05, atol=1e-06) def test_api_eager_dygraph(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_group_norm_op.py b/python/paddle/fluid/tests/unittests/test_group_norm_op.py index 179b197cf62..0a810735b31 100644 --- a/python/paddle/fluid/tests/unittests/test_group_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_group_norm_op.py @@ -284,8 +284,8 @@ class TestGroupNormAPI_With_NHWC(unittest.TestCase): epsilon=1e-5, groups=2, data_layout="NCHW") - self.assertTrue(np.allclose(results[0], expect_res1[0])) - self.assertTrue(np.allclose(results[1], expect_res2[0])) + np.testing.assert_allclose(results[0], expect_res1[0], rtol=1e-05) + np.testing.assert_allclose(results[1], expect_res2[0], rtol=1e-05) class TestGroupNormException(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_group_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_group_norm_op_v2.py index 42f97585172..26073401b68 100644 --- a/python/paddle/fluid/tests/unittests/test_group_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_group_norm_op_v2.py @@ -123,7 +123,7 @@ class TestDygraphGroupNormv2(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1 = compute_v1(x) y2 = compute_v2(x) - self.assertTrue(np.allclose(y1, y2, atol=1e-5)) + np.testing.assert_allclose(y1, y2, rtol=1e-05, atol=1e-05) def test_eager_api(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py b/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py index 650626883c7..a411289dcbf 100644 --- a/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py @@ -161,8 +161,10 @@ class TestGumbelSoftmaxOpGrad(unittest.TestCase): out_hard.sum().backward() out_soft.sum().backward() - self.assertEqual(np.allclose(x_hard.grad.numpy(), x_soft.grad.numpy()), - True) + np.testing.assert_allclose(x_hard.grad.numpy(), + x_soft.grad.numpy(), + rtol=1e-5, + atol=1e-8) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_hinge_embedding_loss.py b/python/paddle/fluid/tests/unittests/test_hinge_embedding_loss.py index 9f281e6bf39..b9014beeda5 100644 --- a/python/paddle/fluid/tests/unittests/test_hinge_embedding_loss.py +++ b/python/paddle/fluid/tests/unittests/test_hinge_embedding_loss.py @@ -49,7 +49,7 @@ class TestFunctionalHingeEmbeddingLoss(unittest.TestCase): dy_result = paddle.nn.functional.hinge_embedding_loss(input, label) expected = calc_hinge_embedding_loss(self.input_np, self.label_np) - self.assertTrue(np.allclose(dy_result.numpy(), expected)) + np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) dy_result = paddle.nn.functional.hinge_embedding_loss(input, @@ -58,7 +58,7 @@ class TestFunctionalHingeEmbeddingLoss(unittest.TestCase): expected = calc_hinge_embedding_loss(self.input_np, self.label_np, reduction='sum') - self.assertTrue(np.allclose(dy_result.numpy(), expected)) + np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) dy_result = paddle.nn.functional.hinge_embedding_loss(input, @@ -67,7 +67,7 @@ class TestFunctionalHingeEmbeddingLoss(unittest.TestCase): expected = calc_hinge_embedding_loss(self.input_np, self.label_np, reduction='none') - self.assertTrue(np.allclose(dy_result.numpy(), expected)) + np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, self.shape) def run_static_check(self, place=paddle.CPUPlace): @@ -91,7 +91,7 @@ class TestFunctionalHingeEmbeddingLoss(unittest.TestCase): "label": self.label_np }, fetch_list=[st_result]) - self.assertTrue(np.allclose(result_numpy, expected)) + np.testing.assert_allclose(result_numpy, expected, rtol=1e-05) def test_cpu(self): self.run_dynamic_check(place=paddle.CPUPlace()) @@ -129,7 +129,7 @@ class TestClassHingeEmbeddingLoss(unittest.TestCase): hinge_embedding_loss = paddle.nn.loss.HingeEmbeddingLoss() dy_result = hinge_embedding_loss(input, label) expected = calc_hinge_embedding_loss(self.input_np, self.label_np) - self.assertTrue(np.allclose(dy_result.numpy(), expected)) + np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) hinge_embedding_loss = paddle.nn.loss.HingeEmbeddingLoss( @@ -138,7 +138,7 @@ class TestClassHingeEmbeddingLoss(unittest.TestCase): expected = calc_hinge_embedding_loss(self.input_np, self.label_np, reduction='sum') - self.assertTrue(np.allclose(dy_result.numpy(), expected)) + np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) hinge_embedding_loss = paddle.nn.loss.HingeEmbeddingLoss( @@ -147,7 +147,7 @@ class TestClassHingeEmbeddingLoss(unittest.TestCase): expected = calc_hinge_embedding_loss(self.input_np, self.label_np, reduction='none') - self.assertTrue(np.allclose(dy_result.numpy(), expected)) + np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, self.shape) def run_static_check(self, place=paddle.CPUPlace): @@ -172,7 +172,7 @@ class TestClassHingeEmbeddingLoss(unittest.TestCase): "label": self.label_np }, fetch_list=[st_result]) - self.assertTrue(np.allclose(result_numpy, expected)) + np.testing.assert_allclose(result_numpy, expected, rtol=1e-05) def test_cpu(self): self.run_dynamic_check(place=paddle.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py index 5c5c15cc3c4..dcb161cfb7f 100644 --- a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py +++ b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py @@ -490,7 +490,7 @@ class TestHSigmoidLossAPI(unittest.TestCase): out2 = m(x, labels, path_table, path_code) for out in [out1, out2]: - self.assertTrue(np.allclose(self.out_np, out.numpy())) + np.testing.assert_allclose(self.out_np, out.numpy(), rtol=1e-05) paddle.enable_static() def test_static_api(self): @@ -535,7 +535,7 @@ class TestHSigmoidLossAPI(unittest.TestCase): fetch_list=[out1, out2]) for ret in [ret1, ret2]: - self.assertTrue(np.allclose(self.out_np, ret)) + np.testing.assert_allclose(self.out_np, ret, rtol=1e-05) def test_fluid_api(self): train_program = fluid.Program() @@ -562,7 +562,7 @@ class TestHSigmoidLossAPI(unittest.TestCase): feed_dict["path_table"] = self.path_table_np ret, = exe.run(train_program, feed=feed_dict, fetch_list=[out]) - self.assertTrue(np.allclose(ret, self.out_np)) + np.testing.assert_allclose(ret, self.out_np, rtol=1e-05) def test_errors(self): with paddle.static.program_guard(paddle.static.Program(), diff --git a/python/paddle/fluid/tests/unittests/test_identity_loss_op.py b/python/paddle/fluid/tests/unittests/test_identity_loss_op.py index 3912fcafd52..81e68f61c20 100644 --- a/python/paddle/fluid/tests/unittests/test_identity_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_identity_loss_op.py @@ -150,7 +150,7 @@ class TestIdentityLossAPI(unittest.TestCase): self.identity_loss_ref(self.x, 2) ] for out, out_ref in zip(res, ref): - self.assertEqual(np.allclose(out, out_ref, rtol=1e-04), True) + np.testing.assert_allclose(out, out_ref, rtol=0.0001) def test_api_dygraph(self): paddle.disable_static(self.place) @@ -159,8 +159,7 @@ class TestIdentityLossAPI(unittest.TestCase): x_tensor = paddle.to_tensor(x) out = paddle.incubate.identity_loss(x_tensor, reduction) out_ref = self.identity_loss_ref(x, reduction) - self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-04), - True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.0001) test_case(self.x, 0) test_case(self.x, 1) diff --git a/python/paddle/fluid/tests/unittests/test_identity_op.py b/python/paddle/fluid/tests/unittests/test_identity_op.py index 17174b0d8e9..c4ab5791828 100644 --- a/python/paddle/fluid/tests/unittests/test_identity_op.py +++ b/python/paddle/fluid/tests/unittests/test_identity_op.py @@ -37,7 +37,7 @@ class TestIdentityAPI(unittest.TestCase): out_ref = self.x for out in res: - self.assertEqual(np.allclose(out, out_ref, rtol=1e-08), True) + np.testing.assert_allclose(out, out_ref, rtol=1e-08) def test_api_dygraph(self): paddle.disable_static(self.place) @@ -46,7 +46,7 @@ class TestIdentityAPI(unittest.TestCase): out = id_layer(x_tensor) out_ref = self.x - self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-08), True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-08) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py b/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py index 79194928f9d..3491345d67e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py @@ -244,13 +244,13 @@ class TestAmpScaler(unittest.TestCase): []) # optimize_ops is [] in dygraph mode for i in range(len(outs_with_scaler[1])): # check each grad - self.assertEqual( - np.allclose(outs_with_scaler[1][i][1].numpy(), - outs_no_scaler[1][i][1].numpy()), True) + np.testing.assert_allclose(outs_with_scaler[1][i][1].numpy(), + outs_no_scaler[1][i][1].numpy(), + rtol=1e-05) # check each parameter - self.assertEqual( - np.allclose(outs_with_scaler[1][i][0].numpy(), - outs_no_scaler[1][i][0].numpy()), True) + np.testing.assert_allclose(outs_with_scaler[1][i][0].numpy(), + outs_no_scaler[1][i][0].numpy(), + rtol=1e-05) def test_minimize(self): self.minimize() @@ -291,9 +291,9 @@ class TestAmpScaler(unittest.TestCase): for i in range(len(outs_with_scaler)): # check each parameter - self.assertEqual( - np.allclose(outs_with_scaler[i].numpy(), - outs_no_scaler[i].numpy()), True) + np.testing.assert_allclose(outs_with_scaler[i].numpy(), + outs_no_scaler[i].numpy(), + rtol=1e-05) def test_step(self): self.step() @@ -555,8 +555,9 @@ class TestGradScalerStateDict(unittest.TestCase): use_data_loader=True, use_save_load=False) print('save_load:', out_use_state_dict[0], out_no_state_dict[0]) - self.assertTrue( - np.allclose(out_use_state_dict[0], out_no_state_dict[0])) + np.testing.assert_allclose(out_use_state_dict[0], + out_no_state_dict[0], + rtol=1e-05) func_isinstance() @@ -872,8 +873,9 @@ class TestPureFp16SaveLoad(unittest.TestCase): use_data_loader=True, use_save_load=False) print('save_load:', out_use_save_load[0], out_no_save_load[0]) - self.assertTrue( - np.allclose(out_use_save_load[0], out_no_save_load[0])) + np.testing.assert_allclose(out_use_save_load[0], + out_no_save_load[0], + rtol=1e-05) func_isinstance() @@ -1114,9 +1116,14 @@ class TestResnet2(unittest.TestCase): out_amp = self.train_resnet(enable_amp=True) out_pure_fp16 = self.train_resnet(enable_amp=True, level='O2') print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - self.assertTrue(np.allclose(out_fp32[0], out_amp[0], atol=1.e-5)) - self.assertTrue( - np.allclose(out_fp32[0], out_pure_fp16[0], atol=1.e-2)) + np.testing.assert_allclose(out_fp32[0], + out_amp[0], + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(out_fp32[0], + out_pure_fp16[0], + rtol=1e-05, + atol=0.01) func_isinstance() @@ -1132,9 +1139,14 @@ class TestResnet2(unittest.TestCase): use_data_loader=True, level='O2') print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - self.assertTrue(np.allclose(out_fp32[0], out_amp[0], atol=1.e-5)) - self.assertTrue( - np.allclose(out_fp32[0], out_pure_fp16[0], atol=1.e-2)) + np.testing.assert_allclose(out_fp32[0], + out_amp[0], + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(out_fp32[0], + out_pure_fp16[0], + rtol=1e-05, + atol=0.01) func_isinstance() @@ -1153,9 +1165,14 @@ class TestResnet2(unittest.TestCase): use_param_group=True, level='O2') print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - self.assertTrue(np.allclose(out_fp32[0], out_amp[0], atol=1.e-5)) - self.assertTrue( - np.allclose(out_fp32[0], out_pure_fp16[0], atol=1.e-2)) + np.testing.assert_allclose(out_fp32[0], + out_amp[0], + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(out_fp32[0], + out_pure_fp16[0], + rtol=1e-05, + atol=0.01) func_isinstance() @@ -1247,9 +1264,14 @@ class TestResnet(unittest.TestCase): out_amp = self.train_resnet(enable_amp=True) out_pure_fp16 = self.train_resnet(enable_amp=True, level='O2') print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - self.assertTrue(np.allclose(out_fp32[0], out_amp[0], atol=1.e-2)) - self.assertTrue( - np.allclose(out_fp32[0], out_pure_fp16[0], atol=1.e-1)) + np.testing.assert_allclose(out_fp32[0], + out_amp[0], + rtol=1e-05, + atol=0.01) + np.testing.assert_allclose(out_fp32[0], + out_pure_fp16[0], + rtol=1e-05, + atol=0.1) func_isinstance() @@ -1301,10 +1323,14 @@ class TestBf16(unittest.TestCase): out_fp32 = self.train(enable_amp=False) out_bf16_O1 = self.train(enable_amp=True, amp_level='O1') out_bf16_O2 = self.train(enable_amp=True, amp_level='O2') - self.assertTrue( - np.allclose(out_fp32, out_bf16_O1, rtol=1.e-3, atol=1.e-1)) - self.assertTrue( - np.allclose(out_fp32, out_bf16_O2, rtol=1.e-3, atol=1.e-1)) + np.testing.assert_allclose(out_fp32, + out_bf16_O1, + rtol=0.001, + atol=0.1) + np.testing.assert_allclose(out_fp32, + out_bf16_O2, + rtol=0.001, + atol=0.1) func_isinstance() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision_for_eager.py b/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision_for_eager.py index 6a256ec1088..015e3a8f459 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision_for_eager.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision_for_eager.py @@ -243,13 +243,13 @@ class TestAmpScaler(unittest.TestCase): []) # optimize_ops is [] in dygraph mode for i in range(len(outs_with_scaler[1])): # check each grad - self.assertEqual( - np.allclose(outs_with_scaler[1][i][1].numpy(), - outs_no_scaler[1][i][1].numpy()), True) + np.testing.assert_allclose(outs_with_scaler[1][i][1].numpy(), + outs_no_scaler[1][i][1].numpy(), + rtol=1e-05) # check each parameter - self.assertEqual( - np.allclose(outs_with_scaler[1][i][0].numpy(), - outs_no_scaler[1][i][0].numpy()), True) + np.testing.assert_allclose(outs_with_scaler[1][i][0].numpy(), + outs_no_scaler[1][i][0].numpy(), + rtol=1e-05) def test_minimize(self): self.minimize() @@ -290,9 +290,9 @@ class TestAmpScaler(unittest.TestCase): for i in range(len(outs_with_scaler)): # check each parameter - self.assertEqual( - np.allclose(outs_with_scaler[i].numpy(), - outs_no_scaler[i].numpy()), True) + np.testing.assert_allclose(outs_with_scaler[i].numpy(), + outs_no_scaler[i].numpy(), + rtol=1e-05) def test_step(self): self.step() @@ -554,8 +554,9 @@ class TestGradScalerStateDict(unittest.TestCase): use_data_loader=True, use_save_load=False) print('save_load:', out_use_state_dict[0], out_no_state_dict[0]) - self.assertTrue( - np.allclose(out_use_state_dict[0], out_no_state_dict[0])) + np.testing.assert_allclose(out_use_state_dict[0], + out_no_state_dict[0], + rtol=1e-05) func_isinstance() @@ -863,8 +864,9 @@ class TestPureFp16SaveLoad(unittest.TestCase): use_data_loader=True, use_save_load=False) print('save_load:', out_use_save_load[0], out_no_save_load[0]) - self.assertTrue( - np.allclose(out_use_save_load[0], out_no_save_load[0])) + np.testing.assert_allclose(out_use_save_load[0], + out_no_save_load[0], + rtol=1e-05) func_isinstance() @@ -1105,9 +1107,14 @@ class TestResnet2(unittest.TestCase): out_amp = self.train_resnet(enable_amp=True) out_pure_fp16 = self.train_resnet(enable_amp=True, level='O2') print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - self.assertTrue(np.allclose(out_fp32[0], out_amp[0], atol=1.e-5)) - self.assertTrue( - np.allclose(out_fp32[0], out_pure_fp16[0], atol=1.e-2)) + np.testing.assert_allclose(out_fp32[0], + out_amp[0], + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(out_fp32[0], + out_pure_fp16[0], + rtol=1e-05, + atol=0.01) func_isinstance() @@ -1123,9 +1130,14 @@ class TestResnet2(unittest.TestCase): use_data_loader=True, level='O2') print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - self.assertTrue(np.allclose(out_fp32[0], out_amp[0], atol=1.e-5)) - self.assertTrue( - np.allclose(out_fp32[0], out_pure_fp16[0], atol=1.e-2)) + np.testing.assert_allclose(out_fp32[0], + out_amp[0], + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(out_fp32[0], + out_pure_fp16[0], + rtol=1e-05, + atol=0.01) func_isinstance() @@ -1144,9 +1156,14 @@ class TestResnet2(unittest.TestCase): use_param_group=True, level='O2') print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - self.assertTrue(np.allclose(out_fp32[0], out_amp[0], atol=1.e-5)) - self.assertTrue( - np.allclose(out_fp32[0], out_pure_fp16[0], atol=1.e-2)) + np.testing.assert_allclose(out_fp32[0], + out_amp[0], + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(out_fp32[0], + out_pure_fp16[0], + rtol=1e-05, + atol=0.01) func_isinstance() @@ -1238,9 +1255,14 @@ class TestResnet(unittest.TestCase): out_amp = self.train_resnet(enable_amp=True) out_pure_fp16 = self.train_resnet(enable_amp=True, level='O2') print(out_fp32[0], out_amp[0], out_pure_fp16[0]) - self.assertTrue(np.allclose(out_fp32[0], out_amp[0], atol=1.e-2)) - self.assertTrue( - np.allclose(out_fp32[0], out_pure_fp16[0], atol=1.e-1)) + np.testing.assert_allclose(out_fp32[0], + out_amp[0], + rtol=1e-05, + atol=0.01) + np.testing.assert_allclose(out_fp32[0], + out_pure_fp16[0], + rtol=1e-05, + atol=0.1) func_isinstance() @@ -1290,10 +1312,14 @@ class TestBf16(unittest.TestCase): out_fp32 = self.train(enable_amp=False) out_bf16_O1 = self.train(enable_amp=True, amp_level='O1') out_bf16_O2 = self.train(enable_amp=True, amp_level='O2') - self.assertTrue( - np.allclose(out_fp32, out_bf16_O1, rtol=1.e-3, atol=1.e-1)) - self.assertTrue( - np.allclose(out_fp32, out_bf16_O2, rtol=1.e-3, atol=1.e-1)) + np.testing.assert_allclose(out_fp32, + out_bf16_O1, + rtol=0.001, + atol=0.1) + np.testing.assert_allclose(out_fp32, + out_bf16_O2, + rtol=0.001, + atol=0.1) func_isinstance() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index e1663ad400f..452d8e72672 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -343,11 +343,11 @@ class TestImperative(unittest.TestCase): fluid.set_flags({'FLAGS_sort_sum_gradient': True}) loss2.backward() - self.assertTrue(np.allclose(ret.numpy(), x * 10)) - self.assertTrue(np.allclose(inputs[0].gradient(), x)) - self.assertTrue(np.allclose(ret2.numpy(), x * 10)) + np.testing.assert_allclose(ret.numpy(), x * 10, rtol=1e-05) + np.testing.assert_allclose(inputs[0].gradient(), x, rtol=1e-05) + np.testing.assert_allclose(ret2.numpy(), x * 10, rtol=1e-05) a = inputs2[0].gradient() - self.assertTrue(np.allclose(inputs2[0].gradient(), x)) + np.testing.assert_allclose(inputs2[0].gradient(), x, rtol=1e-05) def test_sum_op(self): with _test_eager_guard(): @@ -526,10 +526,10 @@ class TestImperative(unittest.TestCase): feed={inp.name: np_inp}, fetch_list=[out.name, param_grads[1].name]) - self.assertTrue(np.allclose(dy_out, static_out)) - self.assertTrue(np.allclose(dy_grad, static_grad)) - self.assertTrue(np.allclose(dy_out2, static_out)) - self.assertTrue(np.allclose(dy_grad2, static_grad)) + np.testing.assert_allclose(dy_out, static_out, rtol=1e-05) + np.testing.assert_allclose(dy_grad, static_grad, rtol=1e-05) + np.testing.assert_allclose(dy_out2, static_out, rtol=1e-05) + np.testing.assert_allclose(dy_grad2, static_grad, rtol=1e-05) params = mlp.parameters(True) self.assertEqual("linear_0.w_0", params[0].name) @@ -642,18 +642,18 @@ class TestImperative(unittest.TestCase): loss.backward() np.testing.assert_array_equal(loss.grad.numpy(), [1]) - self.assertTrue( - np.allclose(mlp1._linear1.weight.grad.numpy(), - expected_weight1_grad)) - self.assertTrue( - np.allclose(mlp1._linear1.bias.grad.numpy(), - expected_bias1_grad)) - self.assertTrue( - np.allclose(mlp1._linear2.weight.grad.numpy(), - expected_weight2_grad)) - self.assertTrue( - np.allclose(mlp1._linear2.bias.grad.numpy(), - expected_bias2_grad)) + np.testing.assert_allclose(mlp1._linear1.weight.grad.numpy(), + expected_weight1_grad, + rtol=1e-05) + np.testing.assert_allclose(mlp1._linear1.bias.grad.numpy(), + expected_bias1_grad, + rtol=1e-05) + np.testing.assert_allclose(mlp1._linear2.weight.grad.numpy(), + expected_weight2_grad, + rtol=1e-05) + np.testing.assert_allclose(mlp1._linear2.bias.grad.numpy(), + expected_bias2_grad, + rtol=1e-05) mlp2.clear_gradients() np.testing.assert_array_equal(clear_loss.grad.numpy(), [1]) @@ -734,7 +734,7 @@ class TestImperative(unittest.TestCase): 'inp2': np_inp2 }, fetch_list=out)[0] - self.assertTrue(np.allclose(dygraph_result, static_result)) + np.testing.assert_allclose(dygraph_result, static_result, rtol=1e-05) def test_dygraph_vs_static(self): with _test_eager_guard(): @@ -860,7 +860,7 @@ class TestDygraphUtils(unittest.TestCase): a = paddle.to_tensor(a_np) res1 = func(a, act="sigmoid", use_mkldnn=True, use_cudnn=True) res2 = fluid.layers.sigmoid(a) - self.assertTrue(np.allclose(res1.numpy(), res2.numpy())) + np.testing.assert_allclose(res1.numpy(), res2.numpy(), rtol=1e-05) def test_append_activation_in_dygraph2(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py index 5527ab27691..f0ef2fc782d 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py @@ -327,7 +327,7 @@ class TestDygraphDoubleGrad(TestCase): dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + y2.numpy()) * (x_np > 0) * 2).astype('float32') - self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected)) + np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) def test_example_no_grad_vars(self): with _test_eager_guard(): @@ -387,7 +387,9 @@ class TestDygraphDoubleGrad(TestCase): ) dx_expected = dy_expected * grad_y_np + dz_expected * grad_z_np - self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected)) + np.testing.assert_allclose(dx_actual.numpy(), + dx_expected, + rtol=1e-05) if grad_y is not None: self.assertTrue(grad_y.stop_gradient) @@ -426,7 +428,7 @@ class TestDygraphDoubleGrad(TestCase): # Theoritical result based on math calculation dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * (x_np > 0) * 2).astype('float32') - self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected)) + np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) loss = fluid.layers.reduce_mean(dx_actual * dx_actual + x * x) loss.backward(retain_graph=True) @@ -435,7 +437,7 @@ class TestDygraphDoubleGrad(TestCase): x_grad_expected = (2.0 / float(numel) * (x_np + dx_expected * (x_np > 0) * 2 / float(numel))).astype('float32') - self.assertTrue(np.allclose(x_grad_actual, x_grad_expected)) + np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05) for i in range(5): loss.backward(retain_graph=True) @@ -444,7 +446,9 @@ class TestDygraphDoubleGrad(TestCase): i + 2) * (2.0 / float(numel) * (x_np + dx_expected * (x_np > 0) * 2 / float(numel))).astype('float32') - self.assertTrue(np.allclose(x_grad_actual, x_grad_expected)) + np.testing.assert_allclose(x_grad_actual, + x_grad_expected, + rtol=1e-05) def test_example_with_gradient_accumulation_and_create_graph(self): with _test_eager_guard(): @@ -476,7 +480,7 @@ class TestDygraphDoubleGrad(TestCase): dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + y2.numpy()) * (x_np > 0) * 2).astype('float32') - self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected)) + np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) loss = fluid.layers.reduce_mean(dx_actual * dx_actual + x * x) loss.backward() @@ -485,7 +489,7 @@ class TestDygraphDoubleGrad(TestCase): x_grad_expected = (2.0 / float(numel) * (x_np + dx_expected * (x_np > 0) * 4 / float(numel))).astype('float32') - self.assertTrue(np.allclose(x_grad_actual, x_grad_expected)) + np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05) def test_example_with_gradient_accumulation_and_no_grad_vars(self): with _test_eager_guard(): @@ -514,14 +518,14 @@ class TestDygraphDoubleGrad(TestCase): dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * (x_np > 0) * 2).astype('float32') - self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected)) + np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) loss = fluid.layers.reduce_mean(dx_actual * dx_actual + x * x) loss.backward() x_grad_actual = x.gradient() x_grad_expected = (2.0 * x_np / float(numel)).astype('float32') - self.assertTrue(np.allclose(x_grad_actual, x_grad_expected)) + np.testing.assert_allclose(x_grad_actual, x_grad_expected, rtol=1e-05) def test_example_with_gradient_accumulation_and_not_create_graph(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gan.py b/python/paddle/fluid/tests/unittests/test_imperative_gan.py index e724421d1db..3a52bcc1ead 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gan.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gan.py @@ -231,12 +231,12 @@ class TestDygraphGAN(unittest.TestCase): self.assertEqual(dy_g_loss, static_g_loss) self.assertEqual(dy_d_loss, static_d_loss) for k, v in six.iteritems(dy_params): - self.assertTrue(np.allclose(v, static_params[k])) + np.testing.assert_allclose(v, static_params[k], rtol=1e-05) self.assertEqual(dy_g_loss2, static_g_loss) self.assertEqual(dy_d_loss2, static_d_loss) for k, v in six.iteritems(dy_params2): - self.assertTrue(np.allclose(v, static_params[k])) + np.testing.assert_allclose(v, static_params[k], rtol=1e-05) def test_gan_float32(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py index 28d24f4b5b7..721ce897210 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py @@ -165,9 +165,13 @@ class TestDygraphGNN(unittest.TestCase): model2_gc_weight_value = model2.gc.weight.numpy() self.assertEqual(static_loss, loss_value) - self.assertTrue(np.allclose(static_weight, model_gc_weight_value)) + np.testing.assert_allclose(static_weight, + model_gc_weight_value, + rtol=1e-05) self.assertEqual(static_loss, loss2_value) - self.assertTrue(np.allclose(static_weight, model2_gc_weight_value)) + np.testing.assert_allclose(static_weight, + model2_gc_weight_value, + rtol=1e-05) sys.stderr.write('%s %s\n' % (static_loss, loss_value)) def test_gnn_float32(self): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py index 8015fceff5d..d669d8aad3e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py @@ -197,8 +197,9 @@ class TestDygraphSimpleNet(unittest.TestCase): static_param_updated[static_param_name_list[ k - 1]] = out[k] - self.assertTrue( - np.allclose(static_loss_value, dy_loss_value, rtol=1e-3)) + np.testing.assert_allclose(static_loss_value, + dy_loss_value, + rtol=0.001) for key, value in six.iteritems(static_param_init): np.testing.assert_array_equal(value, dy_param_init[key]) for key, value in six.iteritems(static_param_updated): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py index a365b00e912..c4c5fee292a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py @@ -258,15 +258,22 @@ class TestImperativeMnist(unittest.TestCase): static_param_value[static_param_name_list[i - 1]] = out[i] - self.assertTrue(np.allclose(dy_x_data.all(), static_x_data.all())) + np.testing.assert_allclose(dy_x_data.all(), + static_x_data.all(), + rtol=1e-05) for key, value in six.iteritems(static_param_init_value): - self.assertTrue(np.allclose(value, dy_param_init_value[key])) + np.testing.assert_allclose(value, + dy_param_init_value[key], + rtol=1e-05) - self.assertTrue(np.allclose(static_out, dy_out)) + np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) for key, value in six.iteritems(static_param_value): - self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-5)) + np.testing.assert_allclose(value, + dy_param_value[key], + rtol=1e-05, + atol=1e-05) def test_mnist_float32(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py index 18094024b4a..e96f1ef4230 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py @@ -140,15 +140,22 @@ class TestImperativeMnistSortGradient(unittest.TestCase): if batch_id == 20: break - self.assertTrue(np.allclose(dy_x_data2.all(), static_x_data.all())) + np.testing.assert_allclose(dy_x_data2.all(), + static_x_data.all(), + rtol=1e-05) for key, value in six.iteritems(static_param_init_value): - self.assertTrue(np.allclose(value, dy_param_init_value2[key])) + np.testing.assert_allclose(value, + dy_param_init_value2[key], + rtol=1e-05) - self.assertTrue(np.allclose(static_out, dy_out2)) + np.testing.assert_allclose(static_out, dy_out2, rtol=1e-05) for key, value in six.iteritems(static_param_value): - self.assertTrue(np.allclose(value, dy_param_value2[key], atol=1e-5)) + np.testing.assert_allclose(value, + dy_param_value2[key], + rtol=1e-05, + atol=1e-05) def test_mnist_sort_gradient_float32(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py index 21327255fb6..4085728f5dd 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py @@ -570,23 +570,28 @@ class TestDygraphOCRAttention(unittest.TestCase): static_grad_value[static_grad_name_list[ i - grad_start_pos]] = out[i] - self.assertTrue(np.allclose(static_out, dy_out)) + np.testing.assert_allclose(static_out, dy_out, rtol=1e-05, atol=1e-8) for key, value in six.iteritems(static_param_init_value): np.testing.assert_array_equal(value, dy_param_init_value[key]) for key, value in six.iteritems(static_param_value): - self.assertTrue(np.allclose(value, dy_param_value[key], rtol=1e-05)) + np.testing.assert_allclose(value, + dy_param_value[key], + rtol=1e-05, + atol=1e-8) # check eager here - self.assertTrue(np.allclose(static_out, eager_out)) + np.testing.assert_allclose(static_out, eager_out, rtol=1e-05, atol=1e-8) for key, value in six.iteritems(static_param_init_value): np.testing.assert_array_equal(value, eager_param_init_value[key]) for key, value in six.iteritems(static_param_value): - self.assertTrue( - np.allclose(value, eager_param_value[key], rtol=1e-05)) + np.testing.assert_allclose(value, + eager_param_value[key], + rtol=1e-05, + atol=1e-8) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index d7b55215ae7..dfff93a905e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -196,19 +196,28 @@ class TestImperativeOptimizerBase(unittest.TestCase): static_param_value[static_param_name_list[i - 1]] = out[i] for key, value in six.iteritems(static_param_init_value): - self.assertTrue(np.allclose(value, dy_param_init_value[key])) + np.testing.assert_allclose(value, + dy_param_init_value[key], + rtol=1e-05) if core.is_compiled_with_rocm(): - self.assertTrue(np.allclose(static_out, dy_out, atol=1e-3)) + np.testing.assert_allclose(static_out, + dy_out, + rtol=1e-05, + atol=0.001) else: - self.assertTrue(np.allclose(static_out, dy_out)) + np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) for key, value in six.iteritems(static_param_value): if core.is_compiled_with_rocm(): - self.assertTrue( - np.allclose(value, dy_param_value[key], atol=1e-3)) + np.testing.assert_allclose(value, + dy_param_value[key], + rtol=1e-05, + atol=0.001) else: - self.assertTrue(np.allclose(value, dy_param_value[key])) + np.testing.assert_allclose(value, + dy_param_value[key], + rtol=1e-05) class TestImperativeOptimizerPiecewiseDecay(TestImperativeOptimizerBase): @@ -412,15 +421,16 @@ class TestOptimizerLearningRate(unittest.TestCase): adam = fluid.optimizer.Adam(0.001, parameter_list=linear.parameters()) - self.assertTrue( - np.allclose(adam.current_step_lr(), 0.001, rtol=1e-06, - atol=0.0)) + np.testing.assert_allclose(adam.current_step_lr(), + 0.001, + rtol=1e-06, + atol=0.0) for i in range(10): adam.minimize(loss) lr = adam.current_step_lr() - self.assertTrue(np.allclose(lr, 0.001, rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(lr, 0.001, rtol=1e-06, atol=0.0) def test_constant_lr(self): with _test_eager_guard(): @@ -446,15 +456,17 @@ class TestOptimizerLearningRate(unittest.TestCase): bd, value, 0), parameter_list=linear.parameters()) - self.assertTrue( - np.allclose(adam.current_step_lr(), 0.2, rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(adam.current_step_lr(), + 0.2, + rtol=1e-06, + atol=0.0) ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] for i in range(12): adam.minimize(loss) lr = adam.current_step_lr() - self.assertTrue(np.allclose(lr, ret[i], rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0) def test_lr_decay(self): with _test_eager_guard(): @@ -481,15 +493,17 @@ class TestOptimizerLearningRate(unittest.TestCase): staircase=True), parameter_list=linear.parameters()) - self.assertTrue( - np.allclose(adam.current_step_lr(), 1.0, rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(adam.current_step_lr(), + 1.0, + rtol=1e-06, + atol=0.0) ret = [1.0, 1.0, 1.0, np.exp(-0.5), np.exp(-0.5)] for i in range(5): adam.minimize(loss) lr = adam.current_step_lr() - self.assertTrue(np.allclose(lr, ret[i], rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0) def test_lr_decay_natural_exp(self): with _test_eager_guard(): @@ -515,8 +529,7 @@ class TestOptimizerLearningRate(unittest.TestCase): adam.set_lr(lr_list[i]) adam.minimize(loss) lr = adam.current_step_lr() - self.assertTrue( - np.allclose(lr, lr_list[i], rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(lr, lr_list[i], rtol=1e-06, atol=0.0) lr_var = fluid.layers.create_global_var(shape=[1], value=0.7, @@ -524,7 +537,7 @@ class TestOptimizerLearningRate(unittest.TestCase): adam.set_lr(lr_var) adam.minimize(loss) lr = adam.current_step_lr() - self.assertTrue(np.allclose(lr, 0.7, rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(lr, 0.7, rtol=1e-06, atol=0.0) with self.assertRaises(RuntimeError): adam = fluid.optimizer.Adam(fluid.dygraph.NaturalExpDecay( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py index 2bcf0b97bf8..c1ef5e937e0 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py @@ -212,19 +212,28 @@ class TestImperativeOptimizerBase(unittest.TestCase): static_param_value[static_param_name_list[i - 1]] = out[i] for key, value in six.iteritems(static_param_init_value): - self.assertTrue(np.allclose(value, dy_param_init_value[key])) + np.testing.assert_allclose(value, + dy_param_init_value[key], + rtol=1e-05) if core.is_compiled_with_rocm(): - self.assertTrue(np.allclose(static_out, dy_out, atol=1e-3)) + np.testing.assert_allclose(static_out, + dy_out, + rtol=1e-05, + atol=0.001) else: - self.assertTrue(np.allclose(static_out, dy_out)) + np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) for key, value in six.iteritems(static_param_value): if core.is_compiled_with_rocm(): - self.assertTrue( - np.allclose(value, dy_param_value[key], atol=1e-3)) + np.testing.assert_allclose(value, + dy_param_value[key], + rtol=1e-05, + atol=0.001) else: - self.assertTrue(np.allclose(value, dy_param_value[key])) + np.testing.assert_allclose(value, + dy_param_value[key], + rtol=1e-05) class TestImperativeOptimizerPiecewiseDecay(TestImperativeOptimizerBase): @@ -553,14 +562,16 @@ class TestOptimizerLearningRate(unittest.TestCase): adam = paddle.optimizer.Adam(0.001, parameters=linear.parameters()) - self.assertTrue( - np.allclose(adam.get_lr(), 0.001, rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(adam.get_lr(), + 0.001, + rtol=1e-06, + atol=0.0) for i in range(10): adam.minimize(loss) lr = adam.get_lr() - self.assertTrue(np.allclose(lr, 0.001, rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(lr, 0.001, rtol=1e-06, atol=0.0) def test_constant_lr(self): with _test_eager_guard(): @@ -586,14 +597,13 @@ class TestOptimizerLearningRate(unittest.TestCase): adam = paddle.optimizer.Adam(scheduler, parameters=linear.parameters()) - self.assertTrue( - np.allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(adam.get_lr(), 0.2, rtol=1e-06, atol=0.0) ret = [0.2, 0.2, 0.4, 0.4, 0.6, 0.6, 0.8, 0.8, 1.0, 1.0, 1.0, 1.0] for i in range(12): adam.minimize(loss) lr = adam.get_lr() - self.assertTrue(np.allclose(lr, ret[i], rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0) scheduler.step() def test_lr_decay(self): @@ -616,14 +626,13 @@ class TestOptimizerLearningRate(unittest.TestCase): adam = paddle.optimizer.Adam(scheduler, parameters=linear.parameters()) - self.assertTrue( - np.allclose(adam.get_lr(), 1.0, rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(adam.get_lr(), 1.0, rtol=1e-06, atol=0.0) ret = [1.0, np.exp(-0.5), np.exp(-1)] for i in range(3): adam.minimize(loss) lr = adam.get_lr() - self.assertTrue(np.allclose(lr, ret[i], rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0) scheduler.step() def test_lr_scheduler_natural_exp(self): @@ -650,8 +659,7 @@ class TestOptimizerLearningRate(unittest.TestCase): adam.set_lr(lr_list[i]) adam.minimize(loss) lr = adam.get_lr() - self.assertTrue( - np.allclose(lr, lr_list[i], rtol=1e-06, atol=0.0)) + np.testing.assert_allclose(lr, lr_list[i], rtol=1e-06, atol=0.0) with self.assertRaises(TypeError): lr_var = fluid.layers.create_global_var(shape=[1], diff --git a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py index ac41f84be34..0945cc2b041 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py @@ -178,7 +178,7 @@ class TestImperativeMnist(unittest.TestCase): for i in range(1, len(out)): static_param_value[static_param_name_list[i - 1]] = out[i] - #self.assertTrue(np.allclose(dy_x_data.all(), static_x_data.all())) + # np.testing.assert_allclose(dy_x_data.all(), static_x_data.all(), rtol=1e-5) for key, value in six.iteritems(static_param_init_value): self.assertTrue(np.equal(value, dy_param_init_value[key]).all()) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index 0371176d782..2f266c38458 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -414,24 +414,26 @@ class TestDygraphResnet(unittest.TestCase): print("static", static_out) print("dygraph", dy_out) - self.assertTrue(np.allclose(static_out, dy_out)) + np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) self.assertEqual(len(dy_param_init_value), len(static_param_init_value)) for key, value in six.iteritems(static_param_init_value): - self.assertTrue(np.allclose(value, dy_param_init_value[key])) + np.testing.assert_allclose(value, + dy_param_init_value[key], + rtol=1e-05) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) self.assertEqual(len(dy_grad_value), len(static_grad_value)) for key, value in six.iteritems(static_grad_value): - self.assertTrue(np.allclose(value, dy_grad_value[key])) + np.testing.assert_allclose(value, dy_grad_value[key], rtol=1e-05) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) self.assertEqual(len(dy_param_value), len(static_param_value)) for key, value in six.iteritems(static_param_value): - self.assertTrue(np.allclose(value, dy_param_value[key])) + np.testing.assert_allclose(value, dy_param_value[key], rtol=1e-05) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py index 4942e1db769..15eda5557e5 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py @@ -214,24 +214,26 @@ class TestDygraphResnetSortGradient(unittest.TestCase): static_grad_value[static_grad_name_list[ i - grad_start_pos]] = out[i] - self.assertTrue(np.allclose(static_out, dy_out)) + np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) self.assertEqual(len(dy_param_init_value), len(static_param_init_value)) for key, value in six.iteritems(static_param_init_value): - self.assertTrue(np.allclose(value, dy_param_init_value[key])) + np.testing.assert_allclose(value, + dy_param_init_value[key], + rtol=1e-05) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) self.assertEqual(len(dy_grad_value), len(static_grad_value)) for key, value in six.iteritems(static_grad_value): - self.assertTrue(np.allclose(value, dy_grad_value[key])) + np.testing.assert_allclose(value, dy_grad_value[key], rtol=1e-05) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) self.assertEqual(len(dy_param_value), len(static_param_value)) for key, value in six.iteritems(static_param_value): - self.assertTrue(np.allclose(value, dy_param_value[key])) + np.testing.assert_allclose(value, dy_param_value[key], rtol=1e-05) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py index fa2d470fc5e..aa73e0412f7 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py @@ -467,61 +467,51 @@ class TestImperativeResneXt(unittest.TestCase): static_grad_value[static_grad_name_list[ i - grad_start_pos]] = out[i] - self.assertTrue( - np.allclose(static_out, dy_out), - "\nstatic_out: {}\ndy_out: {}".format(static_out, dy_out)) + np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) self.assertEqual(len(dy_param_init_value), len(static_param_init_value)) for key, value in six.iteritems(static_param_init_value): - self.assertTrue(np.allclose(value, dy_param_init_value[key])) + np.testing.assert_allclose(value, + dy_param_init_value[key], + rtol=1e-05) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) self.assertEqual(len(dy_grad_value), len(static_grad_value)) for key, value in six.iteritems(static_grad_value): - self.assertTrue( - np.allclose(value, dy_grad_value[key]), - "\nstatic_grad_value: {}\ndy_grad_value: {}".format( - value, dy_grad_value[key])) + np.testing.assert_allclose(value, dy_grad_value[key], rtol=1e-05) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) self.assertEqual(len(dy_param_value), len(static_param_value)) for key, value in six.iteritems(static_param_value): - self.assertTrue( - np.allclose(value, dy_param_value[key]), - "\nstatic_param_value: {}\ndy_param_value: {}".format( - value, dy_param_value[key])) + np.testing.assert_allclose(value, dy_param_value[key], rtol=1e-05) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) # check eager - self.assertTrue( - np.allclose(static_out, eager_out), - "\nstatic_out: {}\neager_out: {}".format(static_out, eager_out)) + np.testing.assert_allclose(static_out, eager_out, rtol=1e-05) self.assertEqual(len(eager_param_init_value), len(static_param_init_value)) for key, value in six.iteritems(static_param_init_value): - self.assertTrue(np.allclose(value, eager_param_init_value[key])) + np.testing.assert_allclose(value, + eager_param_init_value[key], + rtol=1e-05) self.assertEqual(len(eager_grad_value), len(static_grad_value)) for key, value in six.iteritems(static_grad_value): - self.assertTrue( - np.allclose(value, eager_grad_value[key]), - "\nstatic_grad_value: {}\neager_grad_value: {}".format( - value, eager_grad_value[key])) + np.testing.assert_allclose(value, eager_grad_value[key], rtol=1e-05) self.assertEqual(len(eager_param_value), len(static_param_value)) for key, value in six.iteritems(static_param_value): - self.assertTrue( - np.allclose(value, eager_param_value[key]), - "\nstatic_param_value: {}\neagear_param_value: {}".format( - value, eager_param_value[key])) + np.testing.assert_allclose(value, + eager_param_value[key], + rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py index 027bb2b9173..fd99f2197e7 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py @@ -320,11 +320,14 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): np.testing.assert_array_equal(value, dy_param_init_value[key]) # np.testing.assert_array_almost_equal(static_out, dy_out) - self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04)) + np.testing.assert_allclose(static_out, dy_out, rtol=1e-05, atol=1e-4) for key, value in six.iteritems(static_param_value): key = dict_old_new_init[key] - self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-4)) + np.testing.assert_allclose(value, + dy_param_value[key], + rtol=1e-05, + atol=1e-4) def test_mnist_train_with_params_filename(self): self.save_dirname = "mnist.inference.model" @@ -347,11 +350,14 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): np.testing.assert_array_equal(value, dy_param_init_value[key]) # np.testing.assert_array_almost_equal(static_out, dy_out) - self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04)) + np.testing.assert_allclose(static_out, dy_out, rtol=1e-05, atol=1e-4) for key, value in six.iteritems(static_param_value): key = dict_old_new_init[key] - self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-4)) + np.testing.assert_allclose(value, + dy_param_value[key], + rtol=1e-05, + atol=1e-4) def test_mnist_infer_no_params_filename(self): self.save_dirname = "mnist.inference.model.noname" @@ -371,7 +377,7 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): np.testing.assert_array_equal(static_x_data, dy_x_data) np.testing.assert_array_almost_equal(static_out, dy_out) - self.assertTrue(np.allclose(static_out, dy_out, atol=1e-04)) + np.testing.assert_allclose(static_out, dy_out, rtol=1e-05, atol=1e-4) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py index 1f7ac043d05..a0e144dee34 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py @@ -234,11 +234,14 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase): key = dict_old_new_init[key] np.testing.assert_array_equal(value, dy_param_init_value[key]) - self.assertTrue(np.allclose(static_out, dy_out)) + np.testing.assert_allclose(static_out, dy_out, rtol=1e-05) for key, value in six.iteritems(static_param_value): key += LOADED_VAR_SUFFIX - self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-5)) + np.testing.assert_allclose(value, + dy_param_value[key], + rtol=1e-05, + atol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py index 732de03ff0e..f47191cebf7 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py @@ -1157,16 +1157,26 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): with _test_eager_guard(): eager_avg_cost_value, eager_sum_cost_value, eager_predict_value, eager_token_num_value, \ eager_param_init, eager_param_updated = run_dygraph() - self.assertTrue(np.allclose(dy_avg_cost_value, eager_avg_cost_value)) - self.assertTrue(np.allclose(dy_sum_cost_value, eager_sum_cost_value)) - - self.assertTrue(np.allclose(dy_predict_value, eager_predict_value)) - self.assertTrue(np.allclose(dy_token_num_value, eager_token_num_value)) + np.testing.assert_allclose(dy_avg_cost_value, + eager_avg_cost_value, + rtol=1e-05) + np.testing.assert_allclose(dy_sum_cost_value, + eager_sum_cost_value, + rtol=1e-05) + + np.testing.assert_allclose(dy_predict_value, + eager_predict_value, + rtol=1e-05) + np.testing.assert_allclose(dy_token_num_value, + eager_token_num_value, + rtol=1e-05) for key, value in six.iteritems(static_param_init): np.testing.assert_array_equal(value, eager_param_init[key]) for key, value in six.iteritems(dy_param_updated): - self.assertTrue(np.allclose(value, eager_param_updated[key])) + np.testing.assert_allclose(value, + eager_param_updated[key], + rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py index d3f2009e69d..fab9313048f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py @@ -184,7 +184,7 @@ class TestDygraphTripleGrad(TestCase): dout = np.ones(self.shape).astype('float32') dx_expected = np.matmul(dout * out_np * (1 - out_np), np.transpose(y_np)) - self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected)) + np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) ddx_actual, = self.grad([dx_actual], [x], create_graph=True) # Theoritical result based on math calculation @@ -197,7 +197,7 @@ class TestDygraphTripleGrad(TestCase): 1 - 2 * out_np) * dout * double_grad_tmp2 * out_np * (1 - out_np) ddx_expected = double_grad_tmp1 + np.matmul(double_grad_tmp3, np.transpose(y_np)) - self.assertTrue(np.allclose(ddx_actual.numpy(), ddx_expected)) + np.testing.assert_allclose(ddx_actual.numpy(), ddx_expected, rtol=1e-05) # Theoritical result based on math calculation d_ddout = np.zeros(self.shape).astype('float32') @@ -210,7 +210,7 @@ class TestDygraphTripleGrad(TestCase): ddx_actual.backward() dddx_grad_actual = x.gradient() - self.assertTrue(np.allclose(dddx_grad_actual, dddx_expected)) + np.testing.assert_allclose(dddx_grad_actual, dddx_expected, rtol=1e-05) def test_all_cases(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) @@ -270,7 +270,7 @@ class TestDygraphTripleGradBradcastCase(TestCase): dout = np.ones(self.x_shape).astype('float32') dx_expected = np.matmul(dout * out_np * (1 - out_np), np.transpose(y_np, axes=(0, 2, 1))) - self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected)) + np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) ddx_actual, = self.grad([dx_actual], [x], create_graph=True) # Theoritical result based on math calculation @@ -283,7 +283,7 @@ class TestDygraphTripleGradBradcastCase(TestCase): 1 - 2 * out_np) * dout * double_grad_tmp2 * out_np * (1 - out_np) ddx_expected = double_grad_tmp1 + np.matmul( double_grad_tmp3, np.transpose(y_np, axes=(0, 2, 1))) - self.assertTrue(np.allclose(ddx_actual.numpy(), ddx_expected)) + np.testing.assert_allclose(ddx_actual.numpy(), ddx_expected, rtol=1e-05) # Theoritical result based on math calculation d_ddout = np.zeros(self.x_shape).astype('float32') @@ -296,7 +296,7 @@ class TestDygraphTripleGradBradcastCase(TestCase): ddx_actual.backward() dddx_grad_actual = x.gradient() - self.assertTrue(np.allclose(dddx_grad_actual, dddx_expected)) + np.testing.assert_allclose(dddx_grad_actual, dddx_expected, rtol=1e-05) def test_all_cases(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) diff --git a/python/paddle/fluid/tests/unittests/test_index_select_op.py b/python/paddle/fluid/tests/unittests/test_index_select_op.py index c8bb7890964..e106be89323 100644 --- a/python/paddle/fluid/tests/unittests/test_index_select_op.py +++ b/python/paddle/fluid/tests/unittests/test_index_select_op.py @@ -112,7 +112,7 @@ class TestIndexSelectAPI(unittest.TestCase): return_numpy=False) expect_out = np.array([[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], [9.0, 10.0, 10.0]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) # case 2: with program_guard(Program(), Program()): @@ -131,7 +131,7 @@ class TestIndexSelectAPI(unittest.TestCase): return_numpy=False) expect_out = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) def test_dygraph_api(self): self.input_data() @@ -143,7 +143,7 @@ class TestIndexSelectAPI(unittest.TestCase): np_z = z.numpy() expect_out = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0]]) - self.assertTrue(np.allclose(expect_out, np_z)) + np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 2: with fluid.dygraph.guard(): @@ -153,7 +153,7 @@ class TestIndexSelectAPI(unittest.TestCase): np_z = z.numpy() expect_out = np.array([[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], [9.0, 10.0, 10.0]]) - self.assertTrue(np.allclose(expect_out, np_z)) + np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_initializer.py b/python/paddle/fluid/tests/unittests/test_initializer.py index df9236d245b..767441e55a4 100644 --- a/python/paddle/fluid/tests/unittests/test_initializer.py +++ b/python/paddle/fluid/tests/unittests/test_initializer.py @@ -668,7 +668,9 @@ class TestUniformInitializerDygraph(unittest.TestCase): tensor = paddle.zeros([1024, 1024, 16]) tensor.stop_gradient = False - self.assertTrue(np.allclose(np.zeros((1024, 1024, 16)), tensor.numpy())) + np.testing.assert_allclose(np.zeros((1024, 1024, 16)), + tensor.numpy(), + rtol=1e-05) uniform_ = paddle.nn.initializer.Uniform() uniform_(tensor) @@ -678,8 +680,7 @@ class TestUniformInitializerDygraph(unittest.TestCase): hist, prob = output_hist(tensor.numpy()) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=1e-3), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001) paddle.enable_static() @@ -710,8 +711,7 @@ class TestXavierInitializerDygraph(unittest.TestCase): hist2, _ = output_hist( np.random.normal(0, np.sqrt(2.0 / (3 + 5)), [1024, 1024, 16])) - self.assertTrue(np.allclose(hist, hist2, rtol=0, atol=0.01), - "hist: " + str(hist) + " hist2: " + str(hist2)) + np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01) paddle.enable_static() def test_xavier_initializer(self, dtype="float32"): @@ -740,8 +740,7 @@ class TestMSRAInitializerDygraph(unittest.TestCase): hist2, _ = output_hist( np.random.normal(0, np.sqrt(2.0 / (4)), [1024, 1024, 16])) - self.assertTrue(np.allclose(hist, hist2, rtol=0, atol=0.01), - "hist: " + str(hist) + " hist2: " + str(hist2)) + np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01) paddle.enable_static() def test_msra_initializer(self, dtype="float32"): @@ -820,7 +819,10 @@ class TestOrthogonalInitializer1(unittest.TestCase): def check_result(self, a, b): np.testing.assert_array_equal(a, b) - self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(10))) + np.testing.assert_allclose(np.matmul(a, a.T), + 9 * np.eye(10), + rtol=1e-5, + atol=1e-8) def func_orthogonal(self): self.config() @@ -879,7 +881,10 @@ class TestOrthogonalInitializer2(TestOrthogonalInitializer1): def check_result(self, a, b): np.testing.assert_array_equal(a, b) - self.assertTrue(np.allclose(np.matmul(a.T, a), 4 * np.eye(10))) + np.testing.assert_allclose(np.matmul(a.T, a), + 4 * np.eye(10), + rtol=1e-5, + atol=1e-8) # 2-D Parameter with shape: [10, 10] @@ -898,8 +903,14 @@ class TestOrthogonalInitializer3(TestOrthogonalInitializer1): def check_result(self, a, b): np.testing.assert_array_equal(a, b) - self.assertTrue(np.allclose(np.matmul(a.T, a), np.eye(10), atol=1.e-6)) - self.assertTrue(np.allclose(np.matmul(a, a.T), np.eye(10), atol=1.e-6)) + np.testing.assert_allclose(np.matmul(a.T, a), + np.eye(10), + rtol=1e-05, + atol=1e-06) + np.testing.assert_allclose(np.matmul(a, a.T), + np.eye(10), + rtol=1e-05, + atol=1e-06) def test_error(self): self.config() @@ -924,7 +935,10 @@ class TestOrthogonalInitializer4(unittest.TestCase): def check_result(self, a, b): np.testing.assert_array_equal(a, b) a = a.reshape(6, -1) - self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(6))) + np.testing.assert_allclose(np.matmul(a, a.T), + 9 * np.eye(6), + rtol=1e-5, + atol=1e-8) def func_orthogonal(self): self.config() @@ -975,7 +989,10 @@ class TestOrthogonalInitializer5(TestOrthogonalInitializer4): def check_result(self, a, b): np.testing.assert_array_equal(a, b) a = a.reshape(50, -1) - self.assertTrue(np.allclose(np.matmul(a.T, a), 4 * np.eye(36))) + np.testing.assert_allclose(np.matmul(a.T, a), + 4 * np.eye(36), + rtol=1e-5, + atol=1e-8) # 4-D Parameter with shape: [36, 4, 3, 3] @@ -995,8 +1012,14 @@ class TestOrthogonalInitializer6(TestOrthogonalInitializer4): def check_result(self, a, b): np.testing.assert_array_equal(a, b) a = a.reshape(36, -1) - self.assertTrue(np.allclose(np.matmul(a.T, a), np.eye(36), atol=1.e-6)) - self.assertTrue(np.allclose(np.matmul(a, a.T), np.eye(36), atol=1.e-6)) + np.testing.assert_allclose(np.matmul(a.T, a), + np.eye(36), + rtol=1e-05, + atol=1e-06) + np.testing.assert_allclose(np.matmul(a, a.T), + np.eye(36), + rtol=1e-05, + atol=1e-06) # initialize Conv1D weight diff --git a/python/paddle/fluid/tests/unittests/test_inner.py b/python/paddle/fluid/tests/unittests/test_inner.py index 8a412d8138f..98faaf8d93f 100644 --- a/python/paddle/fluid/tests/unittests/test_inner.py +++ b/python/paddle/fluid/tests/unittests/test_inner.py @@ -61,37 +61,37 @@ class TestMultiplyApi(unittest.TestCase): x_data = np.random.rand(2, 10, 10).astype(np.float64) y_data = np.random.rand(2, 5, 10).astype(np.float64) res = self._run_static_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.inner(x_data, y_data))) + np.testing.assert_allclose(res, np.inner(x_data, y_data), rtol=1e-05) # test static computation graph: 2-d array x_data = np.random.rand(200, 5).astype(np.float64) y_data = np.random.rand(50, 5).astype(np.float64) res = self._run_static_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.inner(x_data, y_data))) + np.testing.assert_allclose(res, np.inner(x_data, y_data), rtol=1e-05) # test static computation graph: 1-d array x_data = np.random.rand(50).astype(np.float64) y_data = np.random.rand(50).astype(np.float64) res = self._run_static_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.inner(x_data, y_data))) + np.testing.assert_allclose(res, np.inner(x_data, y_data), rtol=1e-05) # test dynamic computation graph: 3-d array x_data = np.random.rand(5, 10, 10).astype(np.float64) y_data = np.random.rand(2, 10).astype(np.float64) res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.inner(x_data, y_data))) + np.testing.assert_allclose(res, np.inner(x_data, y_data), rtol=1e-05) # test dynamic computation graph: 2-d array x_data = np.random.rand(20, 50).astype(np.float64) y_data = np.random.rand(50).astype(np.float64) res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.inner(x_data, y_data))) + np.testing.assert_allclose(res, np.inner(x_data, y_data), rtol=1e-05) # test dynamic computation graph: Scalar x_data = np.random.rand(20, 10).astype(np.float32) y_data = np.random.rand(1).astype(np.float32).item() res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.inner(x_data, y_data))) + np.testing.assert_allclose(res, np.inner(x_data, y_data), rtol=1e-05) # test dynamic computation graph: 2-d array Complex x_data = np.random.rand(20, 50).astype( @@ -99,7 +99,7 @@ class TestMultiplyApi(unittest.TestCase): y_data = np.random.rand(50).astype( np.float64) + 1J * np.random.rand(50).astype(np.float64) res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.inner(x_data, y_data))) + np.testing.assert_allclose(res, np.inner(x_data, y_data), rtol=1e-05) # test dynamic computation graph: 3-d array Complex x_data = np.random.rand(5, 10, 10).astype( @@ -107,7 +107,7 @@ class TestMultiplyApi(unittest.TestCase): y_data = np.random.rand(2, 10).astype( np.float64) + 1J * np.random.rand(2, 10).astype(np.float64) res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.inner(x_data, y_data))) + np.testing.assert_allclose(res, np.inner(x_data, y_data), rtol=1e-05) def test_multiply(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py b/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py index dc0b45206d9..425811d0ed8 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py @@ -146,12 +146,15 @@ class TestInplaceANBOpTraining(unittest.TestCase): for bn_val, inplace_abn_val, name1, name2 in zip(*(fetch_outs + fetch_names)): - self.assertTrue( - np.allclose(bn_val, inplace_abn_val, - atol=1e-2), "Output (" + name1 + ":" + name2 + - ") has diff on {} with {} layout and {} activation. \n".format( - place, layout, activation) + "\nBN " + str(bn_val) + - "\n" + "Inplace ABN " + str(inplace_abn_val)) + np.testing.assert_allclose( + bn_val, + inplace_abn_val, + rtol=1e-05, + atol=0.01, + err_msg='Output (' + name1 + ':' + name2 + + ') has diff on {} with {} layout and {} activation. \n'.format( + place, layout, activation) + '\nBN ' + str(bn_val) + + '\n' + 'Inplace ABN ' + str(inplace_abn_val)) def test_op(self): use_cudas = [False, True] if core.is_compiled_with_cuda() else [False] diff --git a/python/paddle/fluid/tests/unittests/test_input_spec.py b/python/paddle/fluid/tests/unittests/test_input_spec.py index a076b69cc00..79f8bd1ebde 100644 --- a/python/paddle/fluid/tests/unittests/test_input_spec.py +++ b/python/paddle/fluid/tests/unittests/test_input_spec.py @@ -202,13 +202,13 @@ class TestNetWithNonTensorSpec(unittest.TestCase): load_net.eval() pred_out = load_net(self.x) - self.assertTrue(np.allclose(dy_out, pred_out)) + np.testing.assert_allclose(dy_out, pred_out, rtol=1e-05) # @to_static by InputSpec net = paddle.jit.to_static(net, input_spec=specs) st_out = net(self.x, *specs[1:]) - self.assertTrue(np.allclose(dy_out, st_out)) + np.testing.assert_allclose(dy_out, st_out, rtol=1e-05) # jit.save and jit.load paddle.jit.save(net, path) @@ -216,7 +216,7 @@ class TestNetWithNonTensorSpec(unittest.TestCase): load_net.eval() load_out = load_net(self.x) - self.assertTrue(np.allclose(st_out, load_out)) + np.testing.assert_allclose(st_out, load_out, rtol=1e-05) def test_spec_compatible(self): net = NetWithNonTensorSpec(self.in_num, self.out_num) @@ -239,7 +239,7 @@ class TestNetWithNonTensorSpec(unittest.TestCase): load_net.eval() pred_out = load_net(self.x) - self.assertTrue(np.allclose(dy_out, pred_out)) + np.testing.assert_allclose(dy_out, pred_out, rtol=1e-05) class NetWithNonTensorSpecPrune(paddle.nn.Layer): @@ -292,13 +292,13 @@ class TestNetWithNonTensorSpecWithPrune(unittest.TestCase): load_net.eval() pred_out, _ = load_net(self.x, self.y) - self.assertTrue(np.allclose(dy_out, pred_out)) + np.testing.assert_allclose(dy_out, pred_out, rtol=1e-05) # @to_static by InputSpec net = paddle.jit.to_static(net, input_spec=specs) st_out, _ = net(self.x, self.y, *specs[2:]) - self.assertTrue(np.allclose(dy_out, st_out)) + np.testing.assert_allclose(dy_out, st_out, rtol=1e-05) # jit.save and jit.load with prune y and loss prune_specs = [self.x_spec, True] @@ -307,7 +307,7 @@ class TestNetWithNonTensorSpecWithPrune(unittest.TestCase): load_net.eval() load_out = load_net(self.x) # no y and no loss - self.assertTrue(np.allclose(st_out, load_out)) + np.testing.assert_allclose(st_out, load_out, rtol=1e-05) class UnHashableObject: diff --git a/python/paddle/fluid/tests/unittests/test_instance_norm_op.py b/python/paddle/fluid/tests/unittests/test_instance_norm_op.py index f932df9dd33..8308392555c 100644 --- a/python/paddle/fluid/tests/unittests/test_instance_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_instance_norm_op.py @@ -93,7 +93,11 @@ class TestInstanceNormOpTraining(unittest.TestCase): ] def __assert_close(self, tensor, np_array, msg, atol=1e-4): - self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) + np.testing.assert_allclose(np.array(tensor), + np_array, + rtol=1e-05, + atol=atol, + err_msg=msg) def set_global_mean_var(self, mean_shape, x): mean, variance = _cal_mean_variance(x, self.epsilon, mean_shape) @@ -267,7 +271,10 @@ class TestElasticNormOp(unittest.TestCase): param_attr=False, bias_attr=False) outputs = instance_norm(to_variable(inputs)) - self.assertTrue(np.allclose(outputs.numpy(), out_np, atol=1e-6)) + np.testing.assert_allclose(outputs.numpy(), + out_np, + rtol=1e-05, + atol=1e-06) def test_eager_api(self): with _test_eager_guard(): @@ -303,7 +310,10 @@ class TestElasticNormOpCase2(unittest.TestCase): param_attr=True, bias_attr=True) outputs = instance_norm(to_variable(inputs)) - self.assertTrue(np.allclose(outputs.numpy(), out_np, atol=1e-6)) + np.testing.assert_allclose(outputs.numpy(), + out_np, + rtol=1e-05, + atol=1e-06) def test_eager_api(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py index 62677ed2621..28560cccec1 100644 --- a/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py @@ -85,7 +85,7 @@ class TestInstanceNorm(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1 = compute_v1(x) y2 = compute_v2(x) - self.assertTrue(np.allclose(y1, y2)) + np.testing.assert_allclose(y1, y2, rtol=1e-05) def test_static(self): places = [fluid.CPUPlace()] @@ -117,7 +117,7 @@ class TestInstanceNorm(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1 = compute_v1(x) y2 = compute_v2(x) - self.assertTrue(np.allclose(y1, y2)) + np.testing.assert_allclose(y1, y2, rtol=1e-05) def test_eager_api(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_inverse_op.py b/python/paddle/fluid/tests/unittests/test_inverse_op.py index d39707f1042..ec0a94c3c04 100644 --- a/python/paddle/fluid/tests/unittests/test_inverse_op.py +++ b/python/paddle/fluid/tests/unittests/test_inverse_op.py @@ -116,7 +116,9 @@ class TestInverseAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": input_np}, fetch_list=[result]) - self.assertTrue(np.allclose(fetches[0], np.linalg.inv(input_np))) + np.testing.assert_allclose(fetches[0], + np.linalg.inv(input_np), + rtol=1e-05) def test_static(self): for place in self.places: @@ -128,8 +130,9 @@ class TestInverseAPI(unittest.TestCase): input_np = np.random.random([4, 4]).astype("float64") input = fluid.dygraph.to_variable(input_np) result = paddle.inverse(input) - self.assertTrue( - np.allclose(result.numpy(), np.linalg.inv(input_np))) + np.testing.assert_allclose(result.numpy(), + np.linalg.inv(input_np), + rtol=1e-05) class TestInverseAPIError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py index fa3adfb9e99..d8df6d37d03 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py @@ -116,14 +116,14 @@ class TestIrMemoryOptimizeIfElseOp(unittest.TestCase): print(ret1) ret2 = self.check_network_convergence(False, False) print(ret2) - self.assertTrue(np.allclose(ret1, ret2)) + np.testing.assert_allclose(ret1, ret2, rtol=1e-05) if fluid.core.is_compiled_with_cuda(): ret1 = self.check_network_convergence(True, True) print(ret1) ret2 = self.check_network_convergence(True, False) print(ret2) - self.assertTrue(np.allclose(ret1, ret2)) + np.testing.assert_allclose(ret1, ret2, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_jit_layer.py b/python/paddle/fluid/tests/unittests/test_jit_layer.py index fd77aa59988..bc5658127b2 100644 --- a/python/paddle/fluid/tests/unittests/test_jit_layer.py +++ b/python/paddle/fluid/tests/unittests/test_jit_layer.py @@ -71,8 +71,8 @@ class TestMultiLoad(unittest.TestCase): jit_layer.load(model_path, place) forward_out2 = jit_layer.forward(x) infer_out2 = jit_layer.infer(x) - self.assertEqual(np.allclose(forward_out1, forward_out2[0]), True) - self.assertEqual(np.allclose(infer_out1, infer_out2[0]), True) + np.testing.assert_allclose(forward_out1, forward_out2[0], rtol=1e-05) + np.testing.assert_allclose(infer_out1, infer_out2[0], rtol=1e-05) self.temp_dir.cleanup() diff --git a/python/paddle/fluid/tests/unittests/test_jit_save_load.py b/python/paddle/fluid/tests/unittests/test_jit_save_load.py index ab6dea3940e..507083755c0 100644 --- a/python/paddle/fluid/tests/unittests/test_jit_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_jit_save_load.py @@ -457,7 +457,9 @@ class TestSaveLoadWithNestOut(unittest.TestCase): self.assertTrue(len(dy_outs) == 4) for dy_out, load_out in zip(dy_outs, load_outs): - self.assertTrue(np.allclose(dy_out.numpy(), load_out.numpy())) + np.testing.assert_allclose(dy_out.numpy(), + load_out.numpy(), + rtol=1e-05) class TestSaveLoadWithDictInput(unittest.TestCase): @@ -595,7 +597,7 @@ class TestSaveLoadWithInputSpec(unittest.TestCase): pred_xx = infer_layer2(x) # 4. assert pred_x == pred_xx - self.assertTrue(np.allclose(pred_x.numpy(), pred_xx.numpy())) + np.testing.assert_allclose(pred_x.numpy(), pred_xx.numpy(), rtol=1e-05) def test_multi_in_out1(self): net = LinearNetMultiInput1(8, 8) @@ -633,7 +635,7 @@ class TestSaveLoadWithInputSpec(unittest.TestCase): pred_xx = infer_layer2(x) # 4. assert pred_x == pred_xx - self.assertTrue(np.allclose(pred_x.numpy(), pred_xx.numpy())) + np.testing.assert_allclose(pred_x.numpy(), pred_xx.numpy(), rtol=1e-05) class TestJitSaveLoadConfig(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py b/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py index 930d8666ba1..0e38224806c 100644 --- a/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py @@ -101,7 +101,7 @@ class TestKLDivLossDygraph(unittest.TestCase): kldiv_criterion = paddle.nn.KLDivLoss(reduction) pred_loss = kldiv_criterion(paddle.to_tensor(x), paddle.to_tensor(target)) - self.assertTrue(np.allclose(pred_loss.numpy(), gt_loss)) + np.testing.assert_allclose(pred_loss.numpy(), gt_loss, rtol=1e-05) def test_kl_loss_batchmean(self): self.run_kl_loss('batchmean') diff --git a/python/paddle/fluid/tests/unittests/test_kthvalue_op.py b/python/paddle/fluid/tests/unittests/test_kthvalue_op.py index 66eb8ab4f31..ab3337fe55f 100644 --- a/python/paddle/fluid/tests/unittests/test_kthvalue_op.py +++ b/python/paddle/fluid/tests/unittests/test_kthvalue_op.py @@ -108,8 +108,10 @@ class TestKthvalueOpKernels(unittest.TestCase): for axis in self.axises: value_expect, indice_expect = cal_kthvalue(inputs, k, axis) v, inds = paddle.kthvalue(tensor, k, axis) - self.assertTrue(np.allclose(v.numpy(), value_expect)) - self.assertTrue(np.allclose(inds.numpy(), indice_expect)) + np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05) + np.testing.assert_allclose(inds.numpy(), + indice_expect, + rtol=1e-05) def test_gpu_kernel(): shape = (2, 30, 250) @@ -120,8 +122,10 @@ class TestKthvalueOpKernels(unittest.TestCase): for axis in self.axises: value_expect, indice_expect = cal_kthvalue(inputs, k, axis) v, inds = paddle.kthvalue(tensor, k, axis) - self.assertTrue(np.allclose(v.numpy(), value_expect)) - self.assertTrue(np.allclose(inds.numpy(), indice_expect)) + np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05) + np.testing.assert_allclose(inds.numpy(), + indice_expect, + rtol=1e-05) test_cpu_kernel() if fluid.core.is_compiled_with_cuda(): @@ -200,7 +204,7 @@ class TestModeOpInStatic(unittest.TestCase): exe = paddle.static.Executor(paddle.CPUPlace()) paddle_result = exe.run(feed={"x": self.input_data}, fetch_list=[result])[0] - self.assertTrue(np.allclose(paddle_result, expect_value)) + np.testing.assert_allclose(paddle_result, expect_value, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_l1_loss.py b/python/paddle/fluid/tests/unittests/test_l1_loss.py index 01d9dba7b42..0e8221d1b4c 100644 --- a/python/paddle/fluid/tests/unittests/test_l1_loss.py +++ b/python/paddle/fluid/tests/unittests/test_l1_loss.py @@ -31,17 +31,17 @@ class TestFunctionalL1Loss(unittest.TestCase): label = paddle.to_tensor(self.label_np) dy_result = paddle.nn.functional.l1_loss(input, label) expected = np.mean(np.abs(self.input_np - self.label_np)) - self.assertTrue(np.allclose(dy_result.numpy(), expected)) + np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) dy_result = paddle.nn.functional.l1_loss(input, label, reduction='sum') expected = np.sum(np.abs(self.input_np - self.label_np)) - self.assertTrue(np.allclose(dy_result.numpy(), expected)) + np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) dy_result = paddle.nn.functional.l1_loss(input, label, reduction='none') expected = np.abs(self.input_np - self.label_np) - self.assertTrue(np.allclose(dy_result.numpy(), expected)) + np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, [10, 10, 5]) def run_static(self, use_gpu=False): @@ -66,11 +66,11 @@ class TestFunctionalL1Loss(unittest.TestCase): fetch_list=[result0, result1, result2]) expected = np.mean(np.abs(self.input_np - self.label_np)) - self.assertTrue(np.allclose(static_result[0], expected)) + np.testing.assert_allclose(static_result[0], expected, rtol=1e-05) expected = np.sum(np.abs(self.input_np - self.label_np)) - self.assertTrue(np.allclose(static_result[1], expected)) + np.testing.assert_allclose(static_result[1], expected, rtol=1e-05) expected = np.abs(self.input_np - self.label_np) - self.assertTrue(np.allclose(static_result[2], expected)) + np.testing.assert_allclose(static_result[2], expected, rtol=1e-05) self.assertTrue('aaa' in y.name) @@ -122,19 +122,19 @@ class TestClassL1Loss(unittest.TestCase): l1_loss = paddle.nn.loss.L1Loss() dy_result = l1_loss(input, label) expected = np.mean(np.abs(self.input_np - self.label_np)) - self.assertTrue(np.allclose(dy_result.numpy(), expected)) + np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) l1_loss = paddle.nn.loss.L1Loss(reduction='sum') dy_result = l1_loss(input, label) expected = np.sum(np.abs(self.input_np - self.label_np)) - self.assertTrue(np.allclose(dy_result.numpy(), expected)) + np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, [1]) l1_loss = paddle.nn.loss.L1Loss(reduction='none') dy_result = l1_loss(input, label) expected = np.abs(self.input_np - self.label_np) - self.assertTrue(np.allclose(dy_result.numpy(), expected)) + np.testing.assert_allclose(dy_result.numpy(), expected, rtol=1e-05) self.assertTrue(dy_result.shape, [10, 10, 5]) def run_static(self, use_gpu=False): @@ -163,11 +163,11 @@ class TestClassL1Loss(unittest.TestCase): fetch_list=[result0, result1, result2]) expected = np.mean(np.abs(self.input_np - self.label_np)) - self.assertTrue(np.allclose(static_result[0], expected)) + np.testing.assert_allclose(static_result[0], expected, rtol=1e-05) expected = np.sum(np.abs(self.input_np - self.label_np)) - self.assertTrue(np.allclose(static_result[1], expected)) + np.testing.assert_allclose(static_result[1], expected, rtol=1e-05) expected = np.abs(self.input_np - self.label_np) - self.assertTrue(np.allclose(static_result[2], expected)) + np.testing.assert_allclose(static_result[2], expected, rtol=1e-05) self.assertTrue('aaa' in result3.name) def test_cpu(self): diff --git a/python/paddle/fluid/tests/unittests/test_lambv2_op.py b/python/paddle/fluid/tests/unittests/test_lambv2_op.py index 54f84a1bb9b..32c8ada978b 100644 --- a/python/paddle/fluid/tests/unittests/test_lambv2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lambv2_op.py @@ -167,7 +167,7 @@ class TestLambOpWithCombinedOp(unittest.TestCase): }, fetch_list=[loss.name]) - self.assertTrue(np.allclose(out, output)) + np.testing.assert_allclose(out, output, rtol=1e-05) class TestLambOpV2Group(TestLambOpV2): diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py index 1bae5b75210..6ddf9074895 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py @@ -429,7 +429,7 @@ class TestBF16ScaleBiasLayerNorm(unittest.TestCase): x_np, weight_np, bias_np, 'bfloat16') def assert_equal(x, y): - self.assertTrue(np.allclose(x, y, atol=1.e-1)) + np.testing.assert_allclose(x, y, rtol=1e-05, atol=0.1) assert_equal(y_np_1, y_np_2) assert_equal(x_g_np_1, x_g_np_2) diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py index 0242df213f2..62a0f00eba3 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py @@ -49,7 +49,7 @@ class TestDygraphLayerNormv2(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1 = compute_v1(x) y2 = compute_v2(x) - self.assertTrue(np.allclose(y1, y2)) + np.testing.assert_allclose(y1, y2, rtol=1e-05) def test_eager(self): places = [fluid.CPUPlace()] @@ -80,8 +80,8 @@ class TestDygraphLayerNormv2(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1, g1 = compute_v1(x) y2, g2 = compute_v2(x) - self.assertTrue(np.allclose(y1, y2)) - self.assertTrue(np.allclose(g1, g2)) + np.testing.assert_allclose(y1, y2, rtol=1e-05) + np.testing.assert_allclose(g1, g2, rtol=1e-05) def test_static(self): paddle.enable_static() @@ -113,7 +113,7 @@ class TestDygraphLayerNormv2(unittest.TestCase): x = np.random.randn(*shape).astype("float32") y1 = compute_v1(x) y2 = compute_v2(x) - self.assertTrue(np.allclose(y1, y2)) + np.testing.assert_allclose(y1, y2, rtol=1e-05) class TestLayerNormFunction(unittest.TestCase): @@ -159,11 +159,11 @@ class TestLayerNormFunction(unittest.TestCase): y0 = compute_v0(x) y1 = compute_v1(x) y2 = compute_v2(x) - self.assertTrue(np.allclose(y0, y1)) - self.assertTrue(np.allclose(y0, y2)) + np.testing.assert_allclose(y0, y1, rtol=1e-05) + np.testing.assert_allclose(y0, y2, rtol=1e-05) y3 = compute_v3(x) y4 = compute_v4(x) - self.assertTrue(np.allclose(y3, y4)) + np.testing.assert_allclose(y3, y4, rtol=1e-05) self.assertRaises(ValueError, paddle.nn.functional.layer_norm, diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 20bc86646f7..92023f53624 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -400,8 +400,8 @@ class TestLayer(LayerTest): dy_ret = layers.relu(base.to_variable(t)) dy_ret_value = dy_ret.numpy() - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, dy_eager_ret_value)) + np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05) def test_matmul(self): with self.static_graph(): @@ -429,8 +429,8 @@ class TestLayer(LayerTest): dy_ret = layers.matmul(base.to_variable(t), base.to_variable(t2)) dy_ret_value = dy_ret.numpy() - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, dy_eager_ret_value)) + np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05) def test_conv2d(self): with self.static_graph(): @@ -508,9 +508,9 @@ class TestLayer(LayerTest): self.assertRaises(TypeError, test_type) - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, dy_eager_ret_value)) - self.assertTrue(np.allclose(static_ret, static_ret2)) + np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05) with self.dynamic_graph(): with _test_eager_guard(): @@ -637,9 +637,15 @@ class TestLayer(LayerTest): dy_ret_value.append(dy_ret[i].numpy()) for i in range(len(static_ret)): - self.assertTrue(np.allclose(static_ret[i], static_ret2[i])) - self.assertTrue(np.allclose(static_ret[i], dy_ret_value[i])) - self.assertTrue(np.allclose(static_ret[i], dy_eager_ret_value[i])) + np.testing.assert_allclose(static_ret[i], + static_ret2[i], + rtol=1e-05) + np.testing.assert_allclose(static_ret[i], + dy_ret_value[i], + rtol=1e-05) + np.testing.assert_allclose(static_ret[i], + dy_eager_ret_value[i], + rtol=1e-05) with self.dynamic_graph(): with _test_eager_guard(): @@ -749,8 +755,8 @@ class TestLayer(LayerTest): dy_ret = layers.elementwise_mul(ret, to_variable(n6)) dy_ret_value = dy_ret.numpy() - self.assertTrue(np.allclose(static_ret, dy_ret_value)) - self.assertTrue(np.allclose(static_ret, dy_eager_ret_value)) + np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, dy_eager_ret_value, rtol=1e-05) def test_elementwise_minmax(self): n = np.ones([3, 3], dtype='float32') @@ -770,10 +776,10 @@ class TestLayer(LayerTest): min_ret_value = min_ret.numpy() max_ret_value = max_ret.numpy() - self.assertTrue(np.allclose(n, min_ret_value)) - self.assertTrue(np.allclose(n2, max_ret_value)) - self.assertTrue(np.allclose(n, min_eager_ret_value)) - self.assertTrue(np.allclose(n2, max_eager_ret_value)) + np.testing.assert_allclose(n, min_ret_value, rtol=1e-05) + np.testing.assert_allclose(n2, max_ret_value, rtol=1e-05) + np.testing.assert_allclose(n, min_eager_ret_value, rtol=1e-05) + np.testing.assert_allclose(n2, max_eager_ret_value, rtol=1e-05) def test_sequence_conv(self): inp_np = np.arange(12).reshape([3, 4]).astype('float32') @@ -858,9 +864,9 @@ class TestLayer(LayerTest): bias_attr=fluid.initializer.ConstantInitializer(value=1)) dy_rlt = conv2d_transpose(base.to_variable(inp_np)) dy_rlt_value = dy_rlt.numpy() - self.assertTrue(np.allclose(static_rlt2, static_rlt)) - self.assertTrue(np.allclose(dy_rlt_value, static_rlt2)) - self.assertTrue(np.allclose(dy_eager_rlt_value, static_rlt2)) + np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05) + np.testing.assert_allclose(dy_rlt_value, static_rlt2, rtol=1e-05) + np.testing.assert_allclose(dy_eager_rlt_value, static_rlt2, rtol=1e-05) with self.dynamic_graph(): with _test_eager_guard(): @@ -1169,9 +1175,9 @@ class TestLayer(LayerTest): dy_rlt = prelu(base.to_variable(inp_np)) dy_rlt_value = dy_rlt.numpy() - self.assertTrue(np.allclose(static_rlt2, static_rlt)) - self.assertTrue(np.allclose(dy_rlt_value, static_rlt)) - self.assertTrue(np.allclose(dy_eager_rlt_value, static_rlt)) + np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05) + np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05) + np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05) with self.dynamic_graph(): with _test_eager_guard(): @@ -1474,9 +1480,9 @@ class TestLayer(LayerTest): dy_rlt = nce(embs3, wl) dy_rlt_value = dy_rlt.numpy() - self.assertTrue(np.allclose(static_rlt2, static_rlt)) - self.assertTrue(np.allclose(dy_rlt_value, static_rlt)) - self.assertTrue(np.allclose(dy_eager_rlt_value, static_rlt)) + np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05) + np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05) + np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05) with self.dynamic_graph(): with _test_eager_guard(): @@ -1696,9 +1702,9 @@ class TestLayer(LayerTest): dy_ret = conv3d(base.to_variable(images)) dy_rlt_value = dy_ret.numpy() - self.assertTrue(np.allclose(static_ret, dy_rlt_value)) - self.assertTrue(np.allclose(static_ret, dy_eager_rlt_value)) - self.assertTrue(np.allclose(static_ret, static_ret2)) + np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05) with self.dynamic_graph(): with _test_eager_guard(): @@ -1812,7 +1818,7 @@ class TestLayer(LayerTest): # TODO: dygraph can't support LODTensor - self.assertTrue(np.allclose(static_ret, static_ret2)) + np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05) def func_group_norm(self): if core.is_compiled_with_cuda(): @@ -1874,8 +1880,8 @@ class TestLayer(LayerTest): dy_ret = groupNorm(base.to_variable(input)) dy_rlt_value = dy_ret.numpy() - self.assertTrue(np.allclose(static_ret, dy_rlt_value)) - self.assertTrue(np.allclose(static_ret, static_ret2)) + np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05) def test_group_norm(self): with _test_eager_guard(): @@ -1931,11 +1937,11 @@ class TestLayer(LayerTest): dy_ret = instanceNorm(base.to_variable(input)) dy_rlt_value2 = dy_ret.numpy() - self.assertTrue(np.allclose(static_ret, dy_rlt_value)) - self.assertTrue(np.allclose(static_ret, dy_rlt_value2)) - self.assertTrue(np.allclose(static_ret, dy_eager_rlt_value)) - self.assertTrue(np.allclose(static_ret, dy_eager_rlt_value2)) - self.assertTrue(np.allclose(static_ret, static_ret2)) + np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, dy_rlt_value2, rtol=1e-05) + np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, dy_eager_rlt_value2, rtol=1e-05) + np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05) with self.static_graph(): # the input of InstanceNorm must be Variable. @@ -2006,9 +2012,9 @@ class TestLayer(LayerTest): dy_ret = spectralNorm(base.to_variable(input)) dy_rlt_value = dy_ret.numpy() - self.assertTrue(np.allclose(static_ret, dy_rlt_value)) - self.assertTrue(np.allclose(static_ret, dy_eager_rlt_value)) - self.assertTrue(np.allclose(static_ret, static_ret2)) + np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05) def test_tree_conv(self): if core.is_compiled_with_cuda(): @@ -2094,9 +2100,9 @@ class TestLayer(LayerTest): dy_ret = treeConv(base.to_variable(vectors), base.to_variable(adj)) dy_rlt_value = dy_ret.numpy() - self.assertTrue(np.allclose(static_ret, static_ret2)) - self.assertTrue(np.allclose(static_ret, dy_rlt_value)) - self.assertTrue(np.allclose(static_ret, dy_eager_rlt_value)) + np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05) + np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05) + np.testing.assert_allclose(static_ret, dy_eager_rlt_value, rtol=1e-05) with self.dynamic_graph(): with _test_eager_guard(): @@ -2206,9 +2212,9 @@ class TestLayer(LayerTest): use_cudnn=False) dy_rlt = conv3d_transpose(base.to_variable(input_array)) dy_rlt_value = dy_rlt.numpy() - self.assertTrue(np.allclose(static_rlt2, static_rlt)) - self.assertTrue(np.allclose(dy_rlt_value, static_rlt)) - self.assertTrue(np.allclose(dy_eager_rlt_value, static_rlt)) + np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05) + np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05) + np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05) with self.dynamic_graph(): with _test_eager_guard(): @@ -2325,15 +2331,25 @@ class TestLayer(LayerTest): eye_tensor_rlt2_value = eye_tensor_rlt2.numpy() diag_tensor_value = diag_tensor.numpy() - self.assertTrue(np.allclose(eager_eye_tensor_value, np_eye)) - self.assertTrue(np.allclose(eager_eye_tensor_rlt1_value, stack_rlt1)) - self.assertTrue(np.allclose(eager_eye_tensor_rlt2_value, stack_rlt2)) - self.assertTrue(np.allclose(eager_diag_tensor_value, np.eye(20))) - - self.assertTrue(np.allclose(eye_tensor_value, np_eye)) - self.assertTrue(np.allclose(eye_tensor_rlt1_value, stack_rlt1)) - self.assertTrue(np.allclose(eye_tensor_rlt2_value, stack_rlt2)) - self.assertTrue(np.allclose(diag_tensor_value, np.eye(20))) + np.testing.assert_allclose(eager_eye_tensor_value, np_eye, rtol=1e-05) + np.testing.assert_allclose(eager_eye_tensor_rlt1_value, + stack_rlt1, + rtol=1e-05) + np.testing.assert_allclose(eager_eye_tensor_rlt2_value, + stack_rlt2, + rtol=1e-05) + np.testing.assert_allclose(eager_diag_tensor_value, + np.eye(20), + rtol=1e-05) + + np.testing.assert_allclose(eye_tensor_value, np_eye, rtol=1e-05) + np.testing.assert_allclose(eye_tensor_rlt1_value, + stack_rlt1, + rtol=1e-05) + np.testing.assert_allclose(eye_tensor_rlt2_value, + stack_rlt2, + rtol=1e-05) + np.testing.assert_allclose(diag_tensor_value, np.eye(20), rtol=1e-05) with self.assertRaises(TypeError): layers.eye(num_rows=3.1) @@ -2878,12 +2894,12 @@ class TestBook(LayerTest): dy_result_value = dy_result.numpy() if method.__name__ in self.all_close_compare: - self.assertTrue( - np.allclose(static_result[0], - dy_result_value, - atol=0, - rtol=1e-05), - "Result of function [{}] compare failed".format( + np.testing.assert_allclose( + static_result[0], + dy_result_value, + rtol=1e-05, + atol=0, + err_msg='Result of function [{}] compare failed'.format( method.__name__)) continue diff --git a/python/paddle/fluid/tests/unittests/test_lbfgs.py b/python/paddle/fluid/tests/unittests/test_lbfgs.py index d4875bce503..36b01d05e78 100644 --- a/python/paddle/fluid/tests/unittests/test_lbfgs.py +++ b/python/paddle/fluid/tests/unittests/test_lbfgs.py @@ -88,10 +88,10 @@ class TestLbfgs(unittest.TestCase): x0 = np.random.random(size=[dimension]).astype('float32') results = test_static_graph(func, x0) - self.assertTrue(np.allclose(minimum, results[2])) + np.testing.assert_allclose(minimum, results[2], rtol=1e-05) results = test_dynamic_graph(func, x0) - self.assertTrue(np.allclose(minimum, results[2].numpy())) + np.testing.assert_allclose(minimum, results[2].numpy(), rtol=1e-05) def test_inf_minima(self): extream_point = np.array([-1, 2]).astype('float32') @@ -119,7 +119,7 @@ class TestLbfgs(unittest.TestCase): x0 = np.array([0.82], dtype='float64') results = test_static_graph(func, x0, dtype='float64') - self.assertTrue(np.allclose(0.8, results[2])) + np.testing.assert_allclose(0.8, results[2], rtol=1e-05) def test_rosenbrock(self): # The Rosenbrock function is a standard optimization test case. @@ -138,7 +138,7 @@ class TestLbfgs(unittest.TestCase): x0 = np.random.random(size=[2]).astype('float32') results = test_dynamic_graph(func, x0) - self.assertTrue(np.allclose(minimum, results[2])) + np.testing.assert_allclose(minimum, results[2], rtol=1e-05) def test_exception(self): @@ -158,7 +158,7 @@ class TestLbfgs(unittest.TestCase): # test initial_inverse_hessian_estimate is good results = test_static_graph_H0(func, x0, H0, dtype='float32') - self.assertTrue(np.allclose([0., 0.], results[2])) + np.testing.assert_allclose([0.0, 0.0], results[2], rtol=1e-05) self.assertTrue(results[0][0]) # test initial_inverse_hessian_estimate is bad and float64 diff --git a/python/paddle/fluid/tests/unittests/test_lcm.py b/python/paddle/fluid/tests/unittests/test_lcm.py index ca78e239da4..cbfaac5a88c 100644 --- a/python/paddle/fluid/tests/unittests/test_lcm.py +++ b/python/paddle/fluid/tests/unittests/test_lcm.py @@ -58,8 +58,9 @@ class TestLcmAPI(unittest.TestCase): x1 = paddle.to_tensor(self.x_np) x2 = paddle.to_tensor(self.y_np) result = paddle.lcm(x1, x2) - self.assertEqual( - np.allclose(np.lcm(self.x_np, self.y_np), result.numpy()), True) + np.testing.assert_allclose(np.lcm(self.x_np, self.y_np), + result.numpy(), + rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py b/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py index b70acce3235..5c4bf7a73fb 100644 --- a/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py +++ b/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py @@ -253,8 +253,9 @@ class TestLearningRateDecayDygraph(unittest.TestCase): t = lr() - self.assertTrue( - np.allclose((t.numpy())[0].item(), right_result[i])) + np.testing.assert_allclose(t.numpy()[0].item(), + right_result[i], + rtol=1e-05) with self.assertRaises(TypeError): lr = fluid.layers.linear_lr_warmup(learning_rate="fake_lr", diff --git a/python/paddle/fluid/tests/unittests/test_lerp_op.py b/python/paddle/fluid/tests/unittests/test_lerp_op.py index 0af6e46c73d..43b1e1f45b5 100644 --- a/python/paddle/fluid/tests/unittests/test_lerp_op.py +++ b/python/paddle/fluid/tests/unittests/test_lerp_op.py @@ -109,7 +109,7 @@ class TestLerpAPI(unittest.TestCase): 'y': self.y.reshape([1, 4]), }) for r in res: - self.assertEqual(np.allclose(self.res_ref, r), True) + np.testing.assert_allclose(self.res_ref, r, rtol=1e-05) for place in self.place: run(place) @@ -122,7 +122,7 @@ class TestLerpAPI(unittest.TestCase): y = paddle.to_tensor(self.y) w = paddle.to_tensor(np.full(4, 0.75).astype(self.dtype)) out = paddle.lerp(x, y, w) - self.assertEqual(np.allclose(self.res_ref, out.numpy()), True) + np.testing.assert_allclose(self.res_ref, out.numpy(), rtol=1e-05) paddle.enable_static() for place in self.place: @@ -135,7 +135,7 @@ class TestLerpAPI(unittest.TestCase): x = paddle.to_tensor(self.x) y = paddle.to_tensor(self.y) x.lerp_(y, 0.75) - self.assertEqual(np.allclose(self.res_ref, x.numpy()), True) + np.testing.assert_allclose(self.res_ref, x.numpy(), rtol=1e-05) paddle.enable_static() for place in self.place: @@ -161,7 +161,7 @@ class TestLerpAPI(unittest.TestCase): y = np.full(30, 10.).astype(self.dtype).reshape([3, 2, 1, 5]) out = paddle.lerp(paddle.to_tensor(x), paddle.to_tensor(y), 0.5) res_ref = x + 0.5 * (y - x) - self.assertEqual(np.allclose(res_ref, out.numpy()), True) + np.testing.assert_allclose(res_ref, out.numpy(), rtol=1e-05) paddle.enable_static() def test_x_y_broadcast_w(self): @@ -172,7 +172,7 @@ class TestLerpAPI(unittest.TestCase): out = paddle.lerp(paddle.to_tensor(x), paddle.to_tensor(y), paddle.to_tensor(w)) res_ref = x + w * (y - x) - self.assertEqual(np.allclose(res_ref, out.numpy()), True) + np.testing.assert_allclose(res_ref, out.numpy(), rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_lgamma_op.py b/python/paddle/fluid/tests/unittests/test_lgamma_op.py index 7d89107f575..13e420abad3 100644 --- a/python/paddle/fluid/tests/unittests/test_lgamma_op.py +++ b/python/paddle/fluid/tests/unittests/test_lgamma_op.py @@ -69,7 +69,7 @@ class TestLgammaOpApi(unittest.TestCase): data_ = paddle.to_tensor(data) out = paddle.fluid.layers.lgamma(data_) result = special.gammaln(data) - self.assertTrue(np.allclose(result, out.numpy())) + np.testing.assert_allclose(result, out.numpy(), rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_linear.py b/python/paddle/fluid/tests/unittests/test_linear.py index b03b8866eaf..8971fd16260 100644 --- a/python/paddle/fluid/tests/unittests/test_linear.py +++ b/python/paddle/fluid/tests/unittests/test_linear.py @@ -87,12 +87,12 @@ class LinearTestCase(unittest.TestCase): paddle.nn.utils._stride_column(linear.weight) expect = [[1.4349908, -0.8099171, -2.64788], [-1.4981681, -1.1784115, -0.023253186]] - self.assertTrue(np.allclose(linear.weight.numpy(), expect)) + np.testing.assert_allclose(linear.weight.numpy(), expect, rtol=1e-05) linear = paddle.nn.Linear(2, 3) expect = [[0.73261100, 0.43836895, 0.07908206], [0.85075015, -1.04724526, 0.64371765]] - self.assertTrue(np.allclose(linear.weight.numpy(), expect)) + np.testing.assert_allclose(linear.weight.numpy(), expect, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_op.py index dd44e70d92e..ece42157f5b 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_op.py @@ -316,7 +316,7 @@ class TestResizeLinearAPI(unittest.TestCase): align_mode=1, align_corners=False) for res in results: - self.assertTrue(np.allclose(res, expect_res)) + np.testing.assert_allclose(res, expect_res, rtol=1e-05) class TestLinearInterpOpAPI2_0(unittest.TestCase): @@ -341,7 +341,7 @@ class TestLinearInterpOpAPI2_0(unittest.TestCase): align_mode=1, align_corners=False) - self.assertTrue(np.allclose(interp.numpy(), expect)) + np.testing.assert_allclose(interp.numpy(), expect, rtol=1e-05) class TestResizeLinearOpUint8(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py index f69a1b47f11..82ed2b83a0d 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py @@ -368,7 +368,7 @@ class TestResizeLinearAPI(unittest.TestCase): align_mode=1, align_corners=False) for res in results: - self.assertTrue(np.allclose(res, expect_res)) + np.testing.assert_allclose(res, expect_res, rtol=1e-05) class TestLinearInterpOpAPI2_0(unittest.TestCase): @@ -393,7 +393,7 @@ class TestLinearInterpOpAPI2_0(unittest.TestCase): align_mode=1, align_corners=False) - self.assertTrue(np.allclose(interp.numpy(), expect)) + np.testing.assert_allclose(interp.numpy(), expect, rtol=1e-05) class TestResizeLinearOpUint8(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py b/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py index 9843410bf76..9ccd7842790 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py +++ b/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py @@ -17,7 +17,7 @@ from __future__ import print_function import unittest import paddle import paddle.fluid.core as core -import numpy +import numpy as np import paddle.fluid.layers as layers from paddle.fluid.framework import Program, program_guard from paddle.fluid.executor import Executor @@ -36,11 +36,10 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): def test_lod_tensor_to_array_level_0(self): tensor = core.LoDTensor() - tensor.set( - numpy.arange(10).reshape(10, 1).astype('int32'), self.place()) + tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place()) tensor.set_recursive_sequence_lengths([[3, 6, 1]]) expect = [ - numpy.array(x).astype('int32') + np.array(x).astype('int32') for x in [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]] ] self.main(tensor=tensor, @@ -50,11 +49,10 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): def test_lod_tensor_to_array_level_0_empty_seq(self): tensor = core.LoDTensor() - tensor.set( - numpy.arange(10).reshape(10, 1).astype('int32'), self.place()) + tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place()) tensor.set_recursive_sequence_lengths([[3, 6, 0, 1]]) expect = [ - numpy.array(x).astype('int32') + np.array(x).astype('int32') for x in [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]] ] self.main(tensor=tensor, @@ -64,15 +62,13 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): def test_lod_tensor_to_array_level_1(self): tensor = core.LoDTensor() - tensor.set( - numpy.arange(20).reshape(20, 1).astype('int32'), self.place()) + tensor.set(np.arange(20).reshape(20, 1).astype('int32'), self.place()) tensor.set_recursive_sequence_lengths([[2, 3], [3, 6, 2, 6, 3]]) expect = [ - numpy.array([9, 10, 0, 1, 2], dtype='int32'), - numpy.array([11, 12, 13, 14, 15, 16, 3, 4, 5, 6, 7, 8], - dtype='int32'), - numpy.array([17, 18, 19], dtype='int32') + np.array([9, 10, 0, 1, 2], dtype='int32'), + np.array([11, 12, 13, 14, 15, 16, 3, 4, 5, 6, 7, 8], dtype='int32'), + np.array([17, 18, 19], dtype='int32') ] lod = [[[2, 3]], [[6, 6]], [[3]]] @@ -83,14 +79,13 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): def test_lod_tensor_to_array_level_1_empty_seq(self): tensor = core.LoDTensor() - tensor.set( - numpy.arange(31).reshape(31, 1).astype('int32'), self.place()) + tensor.set(np.arange(31).reshape(31, 1).astype('int32'), self.place()) tensor.set_recursive_sequence_lengths( [[3, 2, 4, 2], [3, 4, 4, 0, 1, 5, 2, 2, 2, 7, 1]]) expect = [ - numpy.array(item, dtype='int32') for item in [[ + np.array(item, dtype='int32') for item in [[ 12, 13, 14, 15, 16, 0, 1, 2, 23, 24, 25, 26, 27, 28, 29 ], [17, 18, 3, 4, 5, 6, 11, 30], [19, 20, 7, 8, 9, 10], [21, 22]] ] @@ -103,14 +98,13 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): def test_lod_tensor_to_array_level_2(self): tensor = core.LoDTensor() - tensor.set( - numpy.arange(50).reshape(50, 1).astype('int32'), self.place()) + tensor.set(np.arange(50).reshape(50, 1).astype('int32'), self.place()) tensor.set_recursive_sequence_lengths( [[2, 3, 1], [2, 3, 1, 4, 2, 1], [3, 4, 4, 6, 4, 1, 1, 4, 4, 8, 6, 1, 4]]) expect = [ - numpy.array(item, dtype='int32') + np.array(item, dtype='int32') for item in [[21, 0, 1, 2, 3, 4, 5, 6, 46, 47, 48, 49], list(range(22, 39)) + list(range(7, 21)), list(range(39, 46))] @@ -124,8 +118,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): def test_lod_tensor_to_array_level_2_skip_level(self): tensor = core.LoDTensor() - tensor.set( - numpy.arange(50).reshape(50, 1).astype('int32'), self.place()) + tensor.set(np.arange(50).reshape(50, 1).astype('int32'), self.place()) tensor.set_recursive_sequence_lengths( [[2, 3, 1], [2, 3, 1, 4, 2, 1], [3, 4, 4, 6, 4, 1, 1, 4, 4, 8, 6, 1, 4]]) @@ -159,20 +152,23 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): self.check_tensor_same(scope.find_var(result.name).get_tensor(), tensor) self.assertEqual( - numpy.array(scope.find_var(max_len.name).get_tensor())[0], + np.array(scope.find_var(max_len.name).get_tensor())[0], expect_max_len) def check_array_same(self, array, expect_tensor, expect_lod): self.assertEqual(len(expect_tensor), len(array)) for i, exp in enumerate(zip(expect_tensor, expect_lod)): exp_tensor, exp_lod = exp - exp_tensor = numpy.expand_dims(exp_tensor, axis=1) - self.assertTrue(numpy.allclose(exp_tensor, numpy.array(array[i]))) + exp_tensor = np.expand_dims(exp_tensor, axis=1) + np.testing.assert_allclose(exp_tensor, + np.array(array[i]), + rtol=1e-05) self.assertEqual(exp_lod, array[i].recursive_sequence_lengths()) def check_tensor_same(self, actual, expect): - self.assertTrue(numpy.allclose(numpy.array(actual), - numpy.array(expect))) + np.testing.assert_allclose(np.array(actual), + np.array(expect), + rtol=1e-05) self.assertEqual(actual.recursive_sequence_lengths(), expect.recursive_sequence_lengths()) @@ -197,19 +193,19 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase): append_backward(mean) tensor = core.LoDTensor() - tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place) + tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place) tensor.set_recursive_sequence_lengths([[3, 6, 1]]) g_vars = program.global_block().var(x.name + "@GRAD") exe = Executor(place) g_out = [ - numpy.array(item).sum() for item in exe.run(program, - feed={'x': tensor}, - fetch_list=[g_vars], - return_numpy=False) + np.array(item).sum() for item in exe.run(program, + feed={'x': tensor}, + fetch_list=[g_vars], + return_numpy=False) ] - g_out_sum = numpy.array(g_out).sum() + g_out_sum = np.array(g_out).sum() self.assertAlmostEqual(1.0, g_out_sum, delta=0.1) @@ -218,7 +214,7 @@ class TestLoDTensorArrayError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x = numpy.random.random((10)).astype("float32") + x = np.random.random((10)).astype("float32") x2 = layers.data(name='x', shape=[10]) table = lod_rank_table(x2, level=0) @@ -227,7 +223,7 @@ class TestLoDTensorArrayError(unittest.TestCase): self.assertRaises(TypeError, test_x_Variable) - table2 = numpy.random.random((2)).astype("int64") + table2 = np.random.random((2)).astype("int64") def test_table_Variable(): rank_table = lod_tensor_to_array(x=x2, table=table2) @@ -251,7 +247,7 @@ class TestArrayLoDTensorError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x = numpy.random.random((10)).astype("float32") + x = np.random.random((10)).astype("float32") x2 = layers.data(name='x', shape=[10]) table = lod_rank_table(x2, level=0) array = lod_tensor_to_array(x2, table) @@ -261,7 +257,7 @@ class TestArrayLoDTensorError(unittest.TestCase): self.assertRaises(TypeError, test_x_Variable) - table2 = numpy.random.random((2)).astype("int64") + table2 = np.random.random((2)).astype("int64") def test_table_Variable(): rank_table = array_to_lod_tensor(x=array, table=table2) diff --git a/python/paddle/fluid/tests/unittests/test_log_softmax.py b/python/paddle/fluid/tests/unittests/test_log_softmax.py index b1b21e0666f..8f1fb2c1da2 100644 --- a/python/paddle/fluid/tests/unittests/test_log_softmax.py +++ b/python/paddle/fluid/tests/unittests/test_log_softmax.py @@ -130,13 +130,13 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): y = logsoftmax(x) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) - self.assertTrue(np.allclose(out[0], ref_out)) + np.testing.assert_allclose(out[0], ref_out, rtol=1e-05) # test dygrapg api paddle.disable_static() x = paddle.to_tensor(self.x) y = logsoftmax(x) - self.assertTrue(np.allclose(y.numpy(), ref_out)) + np.testing.assert_allclose(y.numpy(), ref_out, rtol=1e-05) paddle.enable_static() def test_check_api(self): @@ -163,12 +163,12 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): y = F.log_softmax(x, axis, dtype) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) - self.assertTrue(np.allclose(out[0], ref_out)) + np.testing.assert_allclose(out[0], ref_out, rtol=1e-05) paddle.disable_static() x = paddle.to_tensor(self.x) y = F.log_softmax(x, axis, dtype) - self.assertTrue(np.allclose(y.numpy(), ref_out), True) + np.testing.assert_allclose(y.numpy(), ref_out, rtol=1e-05) paddle.enable_static() def test_check_api(self): diff --git a/python/paddle/fluid/tests/unittests/test_logcumsumexp_op.py b/python/paddle/fluid/tests/unittests/test_logcumsumexp_op.py index ebc350d13c6..f955e4f7935 100644 --- a/python/paddle/fluid/tests/unittests/test_logcumsumexp_op.py +++ b/python/paddle/fluid/tests/unittests/test_logcumsumexp_op.py @@ -103,22 +103,22 @@ class TestLogcumsumexp(unittest.TestCase): y = paddle.logcumsumexp(data) z = np_logcumsumexp(data_np) - self.assertTrue(np.allclose(z, y.numpy())) + np.testing.assert_allclose(z, y.numpy(), rtol=1e-05) y = paddle.logcumsumexp(data, axis=0) z = np_logcumsumexp(data_np, axis=0) - self.assertTrue(np.allclose(z, y.numpy())) + np.testing.assert_allclose(z, y.numpy(), rtol=1e-05) y = paddle.logcumsumexp(data, axis=-1) z = np_logcumsumexp(data_np, axis=-1) - self.assertTrue(np.allclose(z, y.numpy())) + np.testing.assert_allclose(z, y.numpy(), rtol=1e-05) y = paddle.logcumsumexp(data, dtype='float32') self.assertTrue(y.dtype == core.VarDesc.VarType.FP32) y = paddle.logcumsumexp(data, axis=-2) z = np_logcumsumexp(data_np, axis=-2) - self.assertTrue(np.allclose(z, y.numpy())) + np.testing.assert_allclose(z, y.numpy(), rtol=1e-05) with self.assertRaises(IndexError): y = paddle.logcumsumexp(data, axis=-3) @@ -135,7 +135,7 @@ class TestLogcumsumexp(unittest.TestCase): z = np_logcumsumexp(data_np) # check that our algorithm doesn't overflow self.assertTrue(all(z != np.inf)) - self.assertTrue(np.allclose(z, y.numpy())) + np.testing.assert_allclose(z, y.numpy(), rtol=1e-05) def run_static(self, use_gpu=False): with fluid.program_guard(fluid.Program()): @@ -160,14 +160,14 @@ class TestLogcumsumexp(unittest.TestCase): ]) z = np_logcumsumexp(data_np) - self.assertTrue(np.allclose(z, out[0])) + np.testing.assert_allclose(z, out[0], rtol=1e-05) z = np_logcumsumexp(data_np, axis=0) - self.assertTrue(np.allclose(z, out[1])) + np.testing.assert_allclose(z, out[1], rtol=1e-05) z = np_logcumsumexp(data_np, axis=-1) - self.assertTrue(np.allclose(z, out[2])) + np.testing.assert_allclose(z, out[2], rtol=1e-05) self.assertTrue(out[3].dtype == np.float64) z = np_logcumsumexp(data_np, axis=-2) - self.assertTrue(np.allclose(z, out[4])) + np.testing.assert_allclose(z, out[4], rtol=1e-05) def test_cpu(self): paddle.disable_static(paddle.fluid.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_logit_op.py b/python/paddle/fluid/tests/unittests/test_logit_op.py index 44865936af9..7240fad1a44 100644 --- a/python/paddle/fluid/tests/unittests/test_logit_op.py +++ b/python/paddle/fluid/tests/unittests/test_logit_op.py @@ -93,12 +93,12 @@ class TestLogitAPI(unittest.TestCase): y = paddle.logit(x, eps) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) - self.assertTrue(np.allclose(out[0], ref_out)) + np.testing.assert_allclose(out[0], ref_out, rtol=1e-05) # test dygrapg api paddle.disable_static() x = paddle.to_tensor(self.x) y = paddle.logit(x, 1e-8) - self.assertTrue(np.allclose(y.numpy(), ref_out)) + np.testing.assert_allclose(y.numpy(), ref_out, rtol=1e-05) paddle.enable_static() def test_check_api(self): diff --git a/python/paddle/fluid/tests/unittests/test_logsumexp.py b/python/paddle/fluid/tests/unittests/test_logsumexp.py index 3e54147b180..922349cab36 100644 --- a/python/paddle/fluid/tests/unittests/test_logsumexp.py +++ b/python/paddle/fluid/tests/unittests/test_logsumexp.py @@ -149,12 +149,12 @@ class TestLogsumexpAPI(unittest.TestCase): out = paddle.logsumexp(x, axis, keepdim) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) - self.assertTrue(np.allclose(res[0], out_ref)) + np.testing.assert_allclose(res[0], out_ref, rtol=1e-05) paddle.disable_static(self.place) x = paddle.to_tensor(self.x) out = paddle.logsumexp(x, axis, keepdim) - self.assertTrue(np.allclose(out.numpy(), out_ref)) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-05) paddle.enable_static() def test_api(self): @@ -173,7 +173,7 @@ class TestLogsumexpAPI(unittest.TestCase): out3 = paddle.tensor.math.logsumexp(x) out_ref = ref_logsumexp(self.x) for out in [out1, out2, out3]: - self.assertTrue(np.allclose(out.numpy(), out_ref)) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_lrn_op.py b/python/paddle/fluid/tests/unittests/test_lrn_op.py index 1f8f9c62f0b..761c9e13c21 100644 --- a/python/paddle/fluid/tests/unittests/test_lrn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lrn_op.py @@ -134,8 +134,9 @@ class TestLRNAPI(unittest.TestCase): fetch_list=[out1, out2], return_numpy=True) - self.assertTrue( - np.allclose(results[0], np.transpose(results[1], (0, 3, 1, 2)))) + np.testing.assert_allclose(results[0], + np.transpose(results[1], (0, 3, 1, 2)), + rtol=1e-05) def test_exception(self): input1 = fluid.data(name="input1", shape=[2, 4, 5, 5], dtype="float32") @@ -196,7 +197,7 @@ class TestLocalResponseNormFAPI(unittest.TestCase): fetch_list=[res1, res2]) fetches1_tran = np.transpose(fetches[1], (0, 2, 1)) - self.assertTrue(np.allclose(fetches[0], fetches1_tran)) + np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05) def check_static_4d_input(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -226,7 +227,7 @@ class TestLocalResponseNormFAPI(unittest.TestCase): fetch_list=[res1, res2]) fetches1_tran = np.transpose(fetches[1], (0, 3, 1, 2)) - self.assertTrue(np.allclose(fetches[0], fetches1_tran)) + np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05) def check_static_5d_input(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -255,7 +256,7 @@ class TestLocalResponseNormFAPI(unittest.TestCase): fetch_list=[res1, res2]) fetches1_tran = np.transpose(fetches[1], (0, 4, 1, 2, 3)) - self.assertTrue(np.allclose(fetches[0], fetches1_tran)) + np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05) def test_static(self): for place in self.places: @@ -279,7 +280,7 @@ class TestLocalResponseNormFAPI(unittest.TestCase): data_format='NLC') res2_tran = np.transpose(res2.numpy(), (0, 2, 1)) - self.assertTrue(np.allclose(res1.numpy(), res2_tran)) + np.testing.assert_allclose(res1.numpy(), res2_tran, rtol=1e-05) def check_dygraph_4d_input(self, place): with fluid.dygraph.guard(place): @@ -297,7 +298,7 @@ class TestLocalResponseNormFAPI(unittest.TestCase): data_format='NHWC') res2_tran = np.transpose(res2.numpy(), (0, 3, 1, 2)) - self.assertTrue(np.allclose(res1.numpy(), res2_tran)) + np.testing.assert_allclose(res1.numpy(), res2_tran, rtol=1e-05) def check_dygraph_5d_input(self, place): with fluid.dygraph.guard(place): @@ -315,7 +316,7 @@ class TestLocalResponseNormFAPI(unittest.TestCase): data_format='NDHWC') res2_tran = np.transpose(res2.numpy(), (0, 4, 1, 2, 3)) - self.assertTrue(np.allclose(res1.numpy(), res2_tran)) + np.testing.assert_allclose(res1.numpy(), res2_tran, rtol=1e-05) def test_dygraph(self): for place in self.places: @@ -385,7 +386,7 @@ class TestLocalResponseNormCAPI(unittest.TestCase): res2 = m2(in2) res2_tran = np.transpose(res2.numpy(), (0, 3, 1, 2)) - self.assertTrue(np.allclose(res1.numpy(), res2_tran)) + np.testing.assert_allclose(res1.numpy(), res2_tran, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_lu_op.py b/python/paddle/fluid/tests/unittests/test_lu_op.py index 414dc66f841..917f2b1dbfd 100644 --- a/python/paddle/fluid/tests/unittests/test_lu_op.py +++ b/python/paddle/fluid/tests/unittests/test_lu_op.py @@ -205,10 +205,10 @@ class TestLUAPI(unittest.TestCase): mtp = Pmat_to_perm(sP, min(m, n)) nP = perm_to_Pmat(P, sP.shape[-1]) - self.assertTrue(np.allclose(sU, triu, atol=1e-5)) - self.assertTrue(np.allclose(sL, tril, atol=1e-5)) - self.assertTrue(np.allclose(P, mtp, atol=1e-5)) - self.assertTrue(np.allclose(nP, sP, atol=1e-5)) + np.testing.assert_allclose(sU, triu, rtol=1e-05, atol=1e-05) + np.testing.assert_allclose(sL, tril, rtol=1e-05, atol=1e-05) + np.testing.assert_allclose(P, mtp, rtol=1e-05, atol=1e-05) + np.testing.assert_allclose(nP, sP, rtol=1e-05, atol=1e-05) tensor_shapes = [ (3, 5), @@ -271,7 +271,10 @@ class TestLUAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": a}, fetch_list=[lu, p]) - self.assertTrue(np.allclose(fetches[0], NLU, atol=1e-5)) + np.testing.assert_allclose(fetches[0], + NLU, + rtol=1e-05, + atol=1e-05) tensor_shapes = [ (3, 5), diff --git a/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py b/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py index 246587fba71..89f406b0a2f 100644 --- a/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py +++ b/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py @@ -218,9 +218,9 @@ class TestLU_UnpackAPI(unittest.TestCase): LU, P = paddle.linalg.lu(x) pP, pL, pU = paddle.linalg.lu_unpack(LU, P) - self.assertTrue(np.allclose(sU, pU, atol=1e-5)) - self.assertTrue(np.allclose(sL, pL, atol=1e-5)) - self.assertTrue(np.allclose(sP, pP, atol=1e-5)) + np.testing.assert_allclose(sU, pU, rtol=1e-05, atol=1e-05) + np.testing.assert_allclose(sL, pL, rtol=1e-05, atol=1e-05) + np.testing.assert_allclose(sP, pP, rtol=1e-05, atol=1e-05) tensor_shapes = [ (3, 5), @@ -266,9 +266,18 @@ class TestLU_UnpackAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": a}, fetch_list=[pP, pL, pU]) - self.assertTrue(np.allclose(fetches[0], sP, atol=1e-5)) - self.assertTrue(np.allclose(fetches[1], sL, atol=1e-5)) - self.assertTrue(np.allclose(fetches[2], sU, atol=1e-5)) + np.testing.assert_allclose(fetches[0], + sP, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(fetches[1], + sL, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(fetches[2], + sU, + rtol=1e-05, + atol=1e-05) tensor_shapes = [ (3, 5), diff --git a/python/paddle/fluid/tests/unittests/test_manual_seed.py b/python/paddle/fluid/tests/unittests/test_manual_seed.py index e42487df79a..20a91fc4be8 100644 --- a/python/paddle/fluid/tests/unittests/test_manual_seed.py +++ b/python/paddle/fluid/tests/unittests/test_manual_seed.py @@ -41,8 +41,8 @@ class TestManualSeed(unittest.TestCase): x3_np = x3.numpy() if not fluid.core.is_compiled_with_cuda(): - self.assertTrue(np.allclose(x1_np, x2_np)) - self.assertTrue(np.allclose(x_np, x3_np)) + np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) + np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py index fd768b1516f..a4424a6311a 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py @@ -78,7 +78,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): b = fluid.dygraph.to_variable(b_np) res = a / b #NOTE: Not sure why array_equal fails on windows, allclose is acceptable - self.assertTrue(np.allclose(res.numpy(), a_np / b_np)) + np.testing.assert_allclose(res.numpy(), a_np / b_np, rtol=1e-05) def test_div(self): with _test_eager_guard(): @@ -157,7 +157,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = 0.1 res = a / b - self.assertTrue(np.allclose(res.numpy(), a_np / b)) + np.testing.assert_allclose(res.numpy(), a_np / b, rtol=1e-05) def test_div_scalar(self): with _test_eager_guard(): @@ -172,7 +172,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): a = fluid.dygraph.to_variable(a_np) b = fluid.dygraph.to_variable(b_np) res = a**b - self.assertTrue(np.allclose(res.numpy(), a_np**b_np)) + np.testing.assert_allclose(res.numpy(), a_np**b_np, rtol=1e-05) def test_pow(self): with _test_eager_guard(): @@ -388,11 +388,10 @@ class TestMathOpPatchesVarBase(unittest.TestCase): x = fluid.layers.ones((2, 2), dtype="float32") y = t * x - self.assertTrue( - np.allclose(y.numpy(), - t * np.ones((2, 2), dtype="float32"), - rtol=1e-05, - atol=0.0)) + np.testing.assert_allclose(y.numpy(), + t * np.ones((2, 2), dtype='float32'), + rtol=1e-05, + atol=0.0) def test_np_left_mul(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py index 4b4a4c7e15f..5efd4090bff 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op.py @@ -308,9 +308,13 @@ class API_TestMm(unittest.TestCase): expected_result = np.matmul(data1.reshape(1, 2), data2.reshape(2, 1)) - self.assertTrue( - np.allclose(np_res, expected_result, atol=1e-5), "two value is\ - {}\n{}, check diff!".format(np_res, expected_result)) + np.testing.assert_allclose( + np_res, + expected_result, + rtol=1e-05, + atol=1e-05, + err_msg='two value is {}\n{}, check diff!'.format( + np_res, expected_result)) def test_dygraph_without_out(self): device = fluid.CPUPlace() @@ -321,7 +325,7 @@ class API_TestMm(unittest.TestCase): data2 = fluid.dygraph.to_variable(input_array2) out = paddle.mm(data1, data2) expected_result = np.matmul(input_array1, input_array2) - self.assertTrue(np.allclose(expected_result, out.numpy())) + np.testing.assert_allclose(expected_result, out.numpy(), rtol=1e-05) class Test_API_Matmul(unittest.TestCase): @@ -335,7 +339,7 @@ class Test_API_Matmul(unittest.TestCase): data2 = fluid.dygraph.to_variable(input_array2) out = paddle.matmul(data1, data2) expected_result = np.matmul(input_array1, input_array2) - self.assertTrue(np.allclose(expected_result, out.numpy())) + np.testing.assert_allclose(expected_result, out.numpy(), rtol=1e-05) class API_TestMmError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_matrix_power_op.py b/python/paddle/fluid/tests/unittests/test_matrix_power_op.py index 1eb1f42671b..92ce97d8821 100644 --- a/python/paddle/fluid/tests/unittests/test_matrix_power_op.py +++ b/python/paddle/fluid/tests/unittests/test_matrix_power_op.py @@ -285,8 +285,9 @@ class TestMatrixPowerAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input_x": input_np}, fetch_list=[result]) - self.assertTrue( - np.allclose(fetches[0], np.linalg.matrix_power(input_np, -2))) + np.testing.assert_allclose(fetches[0], + np.linalg.matrix_power(input_np, -2), + rtol=1e-05) def test_static(self): for place in self.places: @@ -298,9 +299,9 @@ class TestMatrixPowerAPI(unittest.TestCase): input_np = np.random.random([4, 4]).astype("float64") input = paddle.to_tensor(input_np) result = paddle.linalg.matrix_power(input, -2) - self.assertTrue( - np.allclose(result.numpy(), - np.linalg.matrix_power(input_np, -2))) + np.testing.assert_allclose(result.numpy(), + np.linalg.matrix_power(input_np, -2), + rtol=1e-05) class TestMatrixPowerAPIError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py b/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py index b0b04a3cc10..69531ca1e38 100644 --- a/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py +++ b/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py @@ -154,7 +154,7 @@ class TestMatrixRankAPI(unittest.TestCase): x_pd = paddle.to_tensor(x_np) rank_np = np.linalg.matrix_rank(x_np, hermitian=True) rank_pd = paddle.linalg.matrix_rank(x_pd, hermitian=True) - self.assertTrue(np.allclose(rank_np, rank_pd)) + np.testing.assert_allclose(rank_np, rank_pd, rtol=1e-05) x_np = np.random.rand(3, 4, 7, 8).astype(np.float64) tol_np = np.random.random([3, 4]).astype(np.float32) @@ -162,14 +162,14 @@ class TestMatrixRankAPI(unittest.TestCase): tol_pd = paddle.to_tensor(tol_np) rank_np = np.linalg.matrix_rank(x_np, tol_np, hermitian=False) rank_pd = paddle.linalg.matrix_rank(x_pd, tol_pd, hermitian=False) - self.assertTrue(np.allclose(rank_np, rank_pd)) + np.testing.assert_allclose(rank_np, rank_pd, rtol=1e-05) x_np = np.random.rand(3, 4, 7, 8).astype(np.float64) x_pd = paddle.to_tensor(x_np) tol = 0.1 rank_np = np.linalg.matrix_rank(x_np, tol, hermitian=False) rank_pd = paddle.linalg.matrix_rank(x_pd, tol, hermitian=False) - self.assertTrue(np.allclose(rank_np, rank_pd)) + np.testing.assert_allclose(rank_np, rank_pd, rtol=1e-05) def test_static(self): paddle.enable_static() @@ -198,7 +198,7 @@ class TestMatrixRankAPI(unittest.TestCase): "TolTensor": tol_np }, fetch_list=[rank_pd]) - self.assertTrue(np.allclose(fetches[0], rank_np)) + np.testing.assert_allclose(fetches[0], rank_np, rtol=1e-05) for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -212,7 +212,7 @@ class TestMatrixRankAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"X": x_np}, fetch_list=[rank_pd]) - self.assertTrue(np.allclose(fetches[0], rank_np)) + np.testing.assert_allclose(fetches[0], rank_np, rtol=1e-05) for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -226,7 +226,7 @@ class TestMatrixRankAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"X": x_np}, fetch_list=[rank_pd]) - self.assertTrue(np.allclose(fetches[0], rank_np)) + np.testing.assert_allclose(fetches[0], rank_np, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py b/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py index 608fad131f5..29f1c0bfddb 100644 --- a/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py +++ b/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py @@ -123,8 +123,10 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase): grad_tensor = paddle.ones_like(x) paddle.autograd.backward([out], [grad_tensor], True) - self.assertEqual(np.allclose(self.np_out[func], out.numpy()), True) - self.assertEqual(np.allclose(self.np_grad[func], x.grad), True) + np.testing.assert_allclose(self.np_out[func], + out.numpy(), + rtol=1e-05) + np.testing.assert_allclose(self.np_grad[func], x.grad, rtol=1e-05) paddle.enable_static() _test_dygraph('amax') diff --git a/python/paddle/fluid/tests/unittests/test_maximum_op.py b/python/paddle/fluid/tests/unittests/test_maximum_op.py index 9568a145ed4..a623946411b 100644 --- a/python/paddle/fluid/tests/unittests/test_maximum_op.py +++ b/python/paddle/fluid/tests/unittests/test_maximum_op.py @@ -53,7 +53,7 @@ class ApiMaximumTest(unittest.TestCase): "y": self.input_y }, fetch_list=[result_max]) - self.assertTrue(np.allclose(res, self.np_expected1)) + np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -66,7 +66,7 @@ class ApiMaximumTest(unittest.TestCase): "z": self.input_z }, fetch_list=[result_max]) - self.assertTrue(np.allclose(res, self.np_expected2)) + np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -79,7 +79,7 @@ class ApiMaximumTest(unittest.TestCase): "c": self.input_c }, fetch_list=[result_max]) - self.assertTrue(np.allclose(res, self.np_expected3)) + np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -92,7 +92,7 @@ class ApiMaximumTest(unittest.TestCase): "c": self.input_c }, fetch_list=[result_max]) - self.assertTrue(np.allclose(res, self.np_expected4)) + np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) def test_dynamic_api(self): paddle.disable_static() @@ -106,17 +106,17 @@ class ApiMaximumTest(unittest.TestCase): res = paddle.maximum(x, y) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected1)) + np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) # test broadcast res = paddle.maximum(x, z) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected2)) + np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) res = paddle.maximum(a, c) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected3)) + np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) res = paddle.maximum(b, c) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected4)) + np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_maxout_op.py b/python/paddle/fluid/tests/unittests/test_maxout_op.py index 64803bf39fe..5e28d52f93f 100644 --- a/python/paddle/fluid/tests/unittests/test_maxout_op.py +++ b/python/paddle/fluid/tests/unittests/test_maxout_op.py @@ -107,7 +107,7 @@ class TestMaxoutAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = maxout_forward_naive(self.x_np, self.groups, self.axis) for r in res: - self.assertTrue(np.allclose(out_ref, r)) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -117,11 +117,11 @@ class TestMaxoutAPI(unittest.TestCase): out2 = m(x) out_ref = maxout_forward_naive(self.x_np, self.groups, self.axis) for r in [out1, out2]: - self.assertTrue(np.allclose(out_ref, r.numpy())) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) out3 = F.maxout(x, self.groups, -1) out3_ref = maxout_forward_naive(self.x_np, self.groups, -1) - self.assertTrue(np.allclose(out3_ref, out3.numpy())) + np.testing.assert_allclose(out3_ref, out3.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -131,12 +131,12 @@ class TestMaxoutAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = maxout_forward_naive(self.x_np, self.groups, self.axis) - self.assertTrue(np.allclose(out_ref, res[0])) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) paddle.disable_static(self.place) x = paddle.to_tensor(self.x_np) out = paddle.fluid.layers.maxout(x, groups=self.groups, axis=self.axis) - self.assertTrue(np.allclose(out_ref, out.numpy())) + np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) paddle.enable_static() def test_errors(self): diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index a3608b5aa5e..965d70a5950 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -353,7 +353,7 @@ class TestMeanAPI(unittest.TestCase): fetch_list=[out1, out2, out3, out4, out5]) out_ref = np.mean(self.x) for out in res: - self.assertEqual(np.allclose(out, out_ref, rtol=1e-04), True) + np.testing.assert_allclose(out, out_ref, rtol=0.0001) def test_api_dygraph(self): paddle.disable_static(self.place) @@ -366,8 +366,7 @@ class TestMeanAPI(unittest.TestCase): if len(axis) == 0: axis = None out_ref = np.mean(x, axis, keepdims=keepdim) - self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-04), - True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.0001) test_case(self.x) test_case(self.x, []) @@ -387,13 +386,15 @@ class TestMeanAPI(unittest.TestCase): exe = fluid.Executor(place) x_np = np.random.rand(10, 10).astype(np.float32) res = exe.run(feed={"x": x_np}, fetch_list=[out]) - self.assertEqual(np.allclose(res[0], np.mean(x_np, axis=1)), True) + np.testing.assert_allclose(res[0], np.mean(x_np, axis=1), rtol=1e-05) with fluid.dygraph.guard(): x_np = np.random.rand(10, 10).astype(np.float32) x = fluid.dygraph.to_variable(x_np) out = fluid.layers.reduce_mean(input=x, dim=1) - self.assertEqual(np.allclose(out.numpy(), np.mean(x_np, axis=1)), True) + np.testing.assert_allclose(out.numpy(), + np.mean(x_np, axis=1), + rtol=1e-05) def test_errors(self): paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_memcpy_op.py b/python/paddle/fluid/tests/unittests/test_memcpy_op.py index 7a925b10036..f2510e5563c 100755 --- a/python/paddle/fluid/tests/unittests/test_memcpy_op.py +++ b/python/paddle/fluid/tests/unittests/test_memcpy_op.py @@ -74,8 +74,8 @@ class TestMemcpy_FillConstant(unittest.TestCase): gpu_, pinned_ = exe.run(main_program, feed={}, fetch_list=[gpu_var.name, pinned_var.name]) - self.assertTrue(np.allclose(gpu_, pinned_)) - self.assertTrue(np.allclose(pinned_, np.ones((10, 10)))) + np.testing.assert_allclose(gpu_, pinned_, rtol=1e-05) + np.testing.assert_allclose(pinned_, np.ones((10, 10)), rtol=1e-05) def test_pinned_copy_gpu(self): main_program, gpu_var, pinned_var = self.get_prog() @@ -88,8 +88,8 @@ class TestMemcpy_FillConstant(unittest.TestCase): gpu_, pinned_ = exe.run(main_program, feed={}, fetch_list=[gpu_var.name, pinned_var.name]) - self.assertTrue(np.allclose(gpu_, pinned_)) - self.assertTrue(np.allclose(gpu_, np.zeros((10, 10)))) + np.testing.assert_allclose(gpu_, pinned_, rtol=1e-05) + np.testing.assert_allclose(gpu_, np.zeros((10, 10)), rtol=1e-05) def test_hip_copy_bool_value(self): if core.is_compiled_with_rocm(): diff --git a/python/paddle/fluid/tests/unittests/test_merged_adam_op.py b/python/paddle/fluid/tests/unittests/test_merged_adam_op.py index 1038d0db4f6..84f3cc7efb6 100644 --- a/python/paddle/fluid/tests/unittests/test_merged_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_merged_adam_op.py @@ -137,8 +137,10 @@ class TestMergedAdam(unittest.TestCase): if place == 'gpu': np.testing.assert_array_equal(value1[i], value2[i]) else: - self.assertTrue(np.allclose(value1[i], value2[i], - atol=1e-7)) + np.testing.assert_allclose(value1[i], + value2[i], + rtol=1e-05, + atol=1e-07) def get_places(self): places = ['cpu'] diff --git a/python/paddle/fluid/tests/unittests/test_merged_momentum_op.py b/python/paddle/fluid/tests/unittests/test_merged_momentum_op.py index 957b9e45e0c..466ee3afd58 100644 --- a/python/paddle/fluid/tests/unittests/test_merged_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_merged_momentum_op.py @@ -316,7 +316,7 @@ class TestMergedMomentum(unittest.TestCase): if isinstance(place, paddle.CUDAPlace): np.testing.assert_array_equal(out1, out2) else: - self.assertTrue(np.allclose(out1, out2, atol=1e-7)) + np.testing.assert_allclose(out1, out2, rtol=1e-05, atol=1e-07) def get_places(self): places = [paddle.CPUPlace()] @@ -380,7 +380,7 @@ class TestMergedMomentum2(unittest.TestCase): if isinstance(place, paddle.CUDAPlace): np.testing.assert_array_equal(out1, out2) else: - self.assertTrue(np.allclose(out1, out2, atol=1e-7)) + np.testing.assert_allclose(out1, out2, rtol=1e-05, atol=1e-07) outs3 = run_op(use_nesterov=False, use_merged=True) outs4 = run_op(use_nesterov=False, use_merged=False) @@ -389,7 +389,7 @@ class TestMergedMomentum2(unittest.TestCase): if isinstance(place, paddle.CUDAPlace): np.testing.assert_array_equal(out3, out4) else: - self.assertTrue(np.allclose(out3, out4, atol=1e-7)) + np.testing.assert_allclose(out3, out4, rtol=1e-05, atol=1e-07) def get_places(self): places = [paddle.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_minimum_op.py b/python/paddle/fluid/tests/unittests/test_minimum_op.py index 2da5df85826..782083affc1 100644 --- a/python/paddle/fluid/tests/unittests/test_minimum_op.py +++ b/python/paddle/fluid/tests/unittests/test_minimum_op.py @@ -53,7 +53,7 @@ class ApiMinimumTest(unittest.TestCase): "y": self.input_y }, fetch_list=[result_max]) - self.assertTrue(np.allclose(res, self.np_expected1)) + np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -66,7 +66,7 @@ class ApiMinimumTest(unittest.TestCase): "z": self.input_z }, fetch_list=[result_max]) - self.assertTrue(np.allclose(res, self.np_expected2)) + np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -79,7 +79,7 @@ class ApiMinimumTest(unittest.TestCase): "c": self.input_c }, fetch_list=[result_max]) - self.assertTrue(np.allclose(res, self.np_expected3)) + np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -92,7 +92,7 @@ class ApiMinimumTest(unittest.TestCase): "c": self.input_c }, fetch_list=[result_max]) - self.assertTrue(np.allclose(res, self.np_expected4)) + np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) def test_dynamic_api(self): paddle.disable_static() @@ -106,17 +106,17 @@ class ApiMinimumTest(unittest.TestCase): res = paddle.minimum(x, y) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected1)) + np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) # test broadcast res = paddle.minimum(x, z) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected2)) + np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) res = paddle.minimum(a, c) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected3)) + np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) res = paddle.minimum(b, c) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected4)) + np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_mode_op.py b/python/paddle/fluid/tests/unittests/test_mode_op.py index ebb95dfea39..faa6836e40b 100644 --- a/python/paddle/fluid/tests/unittests/test_mode_op.py +++ b/python/paddle/fluid/tests/unittests/test_mode_op.py @@ -123,13 +123,13 @@ class TestModeOpKernels(unittest.TestCase): for axis in self.axises: value_expect, indice_expect = cal_mode(self.inputs, axis) v, inds = paddle.mode(tensor, axis) - self.assertTrue(np.allclose(v.numpy(), value_expect)) + np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05) value_expect, indice_expect = cal_mode(self.inputs, axis, keepdim=True) v, inds = paddle.mode(tensor, axis, keepdim=True) - self.assertTrue(np.allclose(v.numpy(), value_expect)) + np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05) def test_gpu_kernel(): paddle.set_device('gpu') @@ -137,13 +137,13 @@ class TestModeOpKernels(unittest.TestCase): for axis in self.axises: value_expect, indice_expect = cal_mode(self.inputs, axis) v, inds = paddle.mode(tensor, axis) - self.assertTrue(np.allclose(v.numpy(), value_expect)) + np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05) value_expect, indice_expect = cal_mode(self.inputs, axis, keepdim=True) v, inds = paddle.mode(tensor, axis, keepdim=True) - self.assertTrue(np.allclose(v.numpy(), value_expect)) + np.testing.assert_allclose(v.numpy(), value_expect, rtol=1e-05) paddle.disable_static() test_cpu_kernel() @@ -182,7 +182,7 @@ class TestModeOpInStatic(unittest.TestCase): exe = paddle.static.Executor(paddle.CPUPlace()) paddle_result = exe.run(feed={"x": self.input_data}, fetch_list=[result])[0] - self.assertTrue(np.allclose(paddle_result, expect_value)) + np.testing.assert_allclose(paddle_result, expect_value, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_momentum_op.py b/python/paddle/fluid/tests/unittests/test_momentum_op.py index 949bb2fb325..7d037798588 100644 --- a/python/paddle/fluid/tests/unittests/test_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_momentum_op.py @@ -904,10 +904,9 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): output2, params2 = self._momentum_optimize_dygraph( place=place, use_amp=use_amp, use_multi_tensor=False) - self.assertEqual(np.allclose(output1, output2, rtol=1e-05), True) + np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): - self.assertEqual( - np.allclose(params1[idx], params2[idx], rtol=1e-05), True) + np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) def _check_with_param_arrt(self, place, use_amp): output1, params1 = self._momentum_optimize_dygraph( @@ -920,10 +919,9 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): use_amp=use_amp, use_param_attr=True, use_multi_tensor=False) - self.assertEqual(np.allclose(output1, output2, rtol=1e-05), True) + np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): - self.assertEqual( - np.allclose(params1[idx], params2[idx], rtol=1e-05), True) + np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) def _check_with_param_group(self, place, use_amp): output1, params1 = self._momentum_optimize_dygraph( @@ -936,10 +934,9 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): use_amp=use_amp, use_param_group=True, use_multi_tensor=False) - self.assertEqual(np.allclose(output1, output2, rtol=1e-05), True) + np.testing.assert_allclose(output1, output2, rtol=1e-05) for idx in range(len(params1)): - self.assertEqual( - np.allclose(params1[idx], params2[idx], rtol=1e-05), True) + np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05) def test_main(self): for place in self._get_places(): @@ -1017,8 +1014,7 @@ class TestMultiTensorMomentumStatic(unittest.TestCase): use_amp=use_amp, use_multi_tensor=False) for idx in range(len(output1)): - self.assertEqual( - np.allclose(output1[idx], output2[idx], rtol=1e-05), True) + np.testing.assert_allclose(output1[idx], output2[idx], rtol=1e-05) def test_main(self): for place in self._get_places(): diff --git a/python/paddle/fluid/tests/unittests/test_mse_loss.py b/python/paddle/fluid/tests/unittests/test_mse_loss.py index d3dd0d27745..71e7fea3b77 100644 --- a/python/paddle/fluid/tests/unittests/test_mse_loss.py +++ b/python/paddle/fluid/tests/unittests/test_mse_loss.py @@ -47,7 +47,7 @@ class TestMseLoss(unittest.TestCase): }, fetch_list=[output]) - self.assertTrue(np.isclose(np_result, result).all()) + np.testing.assert_allclose(np_result, result, rtol=1e-05) class TestMseInvalidInput(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py index a53c277ad02..e076cc92778 100644 --- a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py @@ -497,7 +497,7 @@ class TestIOU(unittest.TestCase): expt_output = np.array([2.0 / 16.0]).astype('float32') calc_output = np.array([iou(box1, box2, True)]).astype('float32') - self.assertTrue(np.allclose(calc_output, expt_output)) + np.testing.assert_allclose(calc_output, expt_output, rtol=1e-05) class TestMulticlassNMS2Op(TestMulticlassNMSOp): diff --git a/python/paddle/fluid/tests/unittests/test_multinomial_op.py b/python/paddle/fluid/tests/unittests/test_multinomial_op.py index 2233189285a..163f9a1a072 100644 --- a/python/paddle/fluid/tests/unittests/test_multinomial_op.py +++ b/python/paddle/fluid/tests/unittests/test_multinomial_op.py @@ -67,9 +67,12 @@ class TestMultinomialOp(OpTest): # normalize the input to get the probability prob = self.input_np / self.input_np.sum(axis=-1, keepdims=True) sample_prob = self.sample_output(np.array(outs[0])) - self.assertTrue( - np.allclose(sample_prob, prob, rtol=0, atol=0.01), - "sample_prob: " + str(sample_prob) + "\nprob: " + str(prob)) + np.testing.assert_allclose(sample_prob, + prob, + rtol=0, + atol=0.01, + err_msg='sample_prob: ' + str(sample_prob) + + '\nprob: ' + str(prob)) class TestMultinomialOp2(TestMultinomialOp): @@ -112,9 +115,12 @@ class TestMultinomialApi(unittest.TestCase): sample_prob = sample_output_one_dimension(out.numpy(), 4) prob = x_numpy / x_numpy.sum(axis=-1, keepdims=True) - self.assertTrue( - np.allclose(sample_prob, prob, rtol=0, atol=0.01), - "sample_prob: " + str(sample_prob) + "\nprob: " + str(prob)) + np.testing.assert_allclose(sample_prob, + prob, + rtol=0, + atol=0.01, + err_msg='sample_prob: ' + str(sample_prob) + + '\nprob: ' + str(prob)) def test_dygraph2(self): # input probability is a matrix, and replacement is True @@ -125,9 +131,12 @@ class TestMultinomialApi(unittest.TestCase): sample_prob = sample_output_two_dimension(out.numpy(), [3, 4]) prob = x_numpy / x_numpy.sum(axis=-1, keepdims=True) - self.assertTrue( - np.allclose(sample_prob, prob, rtol=0, atol=0.01), - "sample_prob: " + str(sample_prob) + "\nprob: " + str(prob)) + np.testing.assert_allclose(sample_prob, + prob, + rtol=0, + atol=0.01, + err_msg='sample_prob: ' + str(sample_prob) + + '\nprob: ' + str(prob)) paddle.enable_static() def test_dygraph3(self): @@ -170,9 +179,12 @@ class TestMultinomialApi(unittest.TestCase): sample_prob = sample_output_one_dimension(out, 4) prob = x_np / x_np.sum(axis=-1, keepdims=True) - self.assertTrue( - np.allclose(sample_prob, prob, rtol=0, atol=0.01), - "sample_prob: " + str(sample_prob) + "\nprob: " + str(prob)) + np.testing.assert_allclose(sample_prob, + prob, + rtol=0, + atol=0.01, + err_msg='sample_prob: ' + str(sample_prob) + + '\nprob: ' + str(prob)) class TestMultinomialAlias(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_multiply.py b/python/paddle/fluid/tests/unittests/test_multiply.py index cfc56d5a959..e0b96bc83ac 100755 --- a/python/paddle/fluid/tests/unittests/test_multiply.py +++ b/python/paddle/fluid/tests/unittests/test_multiply.py @@ -62,49 +62,49 @@ class TestMultiplyApi(unittest.TestCase): x_data = np.random.rand(200) y_data = np.random.rand(200) res = self._run_static_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + np.testing.assert_allclose(res, np.multiply(x_data, y_data), rtol=1e-05) # test static computation graph: 2-d array x_data = np.random.rand(2, 500) y_data = np.random.rand(2, 500) res = self._run_static_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + np.testing.assert_allclose(res, np.multiply(x_data, y_data), rtol=1e-05) # test static computation graph: broadcast x_data = np.random.rand(2, 500) y_data = np.random.rand(500) res = self._run_static_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + np.testing.assert_allclose(res, np.multiply(x_data, y_data), rtol=1e-05) # test static computation graph: boolean x_data = np.random.choice([True, False], size=[200]) y_data = np.random.choice([True, False], size=[200]) res = self._run_static_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + np.testing.assert_allclose(res, np.multiply(x_data, y_data), rtol=1e-05) # test dynamic computation graph: 1-d array x_data = np.random.rand(200) y_data = np.random.rand(200) res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + np.testing.assert_allclose(res, np.multiply(x_data, y_data), rtol=1e-05) # test dynamic computation graph: 2-d array x_data = np.random.rand(20, 50) y_data = np.random.rand(20, 50) res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + np.testing.assert_allclose(res, np.multiply(x_data, y_data), rtol=1e-05) # test dynamic computation graph: broadcast x_data = np.random.rand(2, 500) y_data = np.random.rand(500) res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + np.testing.assert_allclose(res, np.multiply(x_data, y_data), rtol=1e-05) # test dynamic computation graph: boolean x_data = np.random.choice([True, False], size=[200]) y_data = np.random.choice([True, False], size=[200]) res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.multiply(x_data, y_data))) + np.testing.assert_allclose(res, np.multiply(x_data, y_data), rtol=1e-05) def test_multiply(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_mv_op.py b/python/paddle/fluid/tests/unittests/test_mv_op.py index 086ed5e693b..763193ec307 100644 --- a/python/paddle/fluid/tests/unittests/test_mv_op.py +++ b/python/paddle/fluid/tests/unittests/test_mv_op.py @@ -56,7 +56,7 @@ class TestMVAPI(unittest.TestCase): z = paddle.mv(self.x, self.vec) np_z = z.numpy() z_expected = np.array(np.dot(self.x_data, self.vec_data)) - self.assertTrue(np.allclose(np_z, z_expected)) + np.testing.assert_allclose(np_z, z_expected, rtol=1e-05) paddle.enable_static() @@ -93,7 +93,7 @@ class TestMVAPI(unittest.TestCase): }, fetch_list=[result_vec]) z_expected = np.array(np.dot(self.input_x, self.input_vec)) - self.assertTrue(np.allclose(res, z_expected)) + np.testing.assert_allclose(res, z_expected, rtol=1e-05) class TestMVError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_nanmean_api.py b/python/paddle/fluid/tests/unittests/test_nanmean_api.py index 7f6306f0ae5..3cb3d77b71b 100644 --- a/python/paddle/fluid/tests/unittests/test_nanmean_api.py +++ b/python/paddle/fluid/tests/unittests/test_nanmean_api.py @@ -51,7 +51,7 @@ class TestNanmeanAPI(unittest.TestCase): fetch_list=[out1, out2, out3, out4, out5]) out_ref = np.nanmean(self.x) for out in res: - self.assertEqual(np.allclose(out, out_ref, rtol=1e-04), True) + np.testing.assert_allclose(out, out_ref, rtol=0.0001) def test_api_dygraph(self): paddle.disable_static(self.place) @@ -70,10 +70,9 @@ class TestNanmeanAPI(unittest.TestCase): out_ref[nan_mask] = 0 out_np = out.numpy() out_np[nan_mask] = 0 - self.assertEqual(np.allclose(out_np, out_ref, rtol=1e-04), True) + np.testing.assert_allclose(out_np, out_ref, rtol=0.0001) else: - self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-04), - True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.0001) test_case(self.x) test_case(self.x, []) @@ -111,7 +110,7 @@ class TestNanmeanAPI(unittest.TestCase): if (cnt == 0).sum(): dx[np.isnan(dx)] = 0 sum_dx = dx.sum() - self.assertEqual(np.allclose(sum_dx, sum_dx_ref, rtol=1e-04), True) + np.testing.assert_allclose(sum_dx, sum_dx_ref, rtol=0.0001) test_case(self.x) test_case(self.x, []) diff --git a/python/paddle/fluid/tests/unittests/test_nanmedian.py b/python/paddle/fluid/tests/unittests/test_nanmedian.py index 74c0c635dd3..2a5e94c824a 100644 --- a/python/paddle/fluid/tests/unittests/test_nanmedian.py +++ b/python/paddle/fluid/tests/unittests/test_nanmedian.py @@ -85,7 +85,7 @@ class TestNanmedian(unittest.TestCase): fetch_list=[out1, out2, out3, out4, out5]) for out in res: - self.assertTrue(np.allclose(np_res, out, equal_nan=True)) + np.testing.assert_allclose(np_res, out, rtol=1e-05, equal_nan=True) def test_api_dygraph(self): paddle.disable_static(self.place) @@ -113,8 +113,10 @@ class TestNanmedian(unittest.TestCase): np_res = np.nanmedian(data, keepdims=keep_dim) pd_res = paddle.nanmedian(paddle.to_tensor(data), keepdim=keep_dim) - self.assertTrue( - np.allclose(np_res, pd_res.numpy(), equal_nan=True)) + np.testing.assert_allclose(np_res, + pd_res.numpy(), + rtol=1e-05, + equal_nan=True) def test_axis_case(data, axis): pd_res = paddle.nanmedian(paddle.to_tensor(data), @@ -122,7 +124,10 @@ class TestNanmedian(unittest.TestCase): keepdim=False) axis = clean_axis_numpy(axis, len(data.shape)) np_res = np.nanmedian(data, axis=axis, keepdims=False) - self.assertTrue(np.allclose(np_res, pd_res.numpy(), equal_nan=True)) + np.testing.assert_allclose(np_res, + pd_res.numpy(), + rtol=1e-05, + equal_nan=True) for name, data in self.fake_data.items(): test_data_case(data) @@ -162,7 +167,7 @@ class TestNanmedian(unittest.TestCase): data = self.fake_data["col_nan_odd"] out = paddle.nanmedian(paddle.to_tensor(data), keepdim=True) np_res = np.nanmedian(data, keepdims=True) - self.assertTrue(np.allclose(np_res, out, equal_nan=True)) + np.testing.assert_allclose(np_res, out, rtol=1e-05, equal_nan=True) paddle.enable_static() def test_check_grad(self): @@ -192,7 +197,7 @@ class TestNanmedian(unittest.TestCase): x_tensor = paddle.to_tensor(x_np, stop_gradient=False) y = paddle.nanmedian(x_tensor, axis=1, keepdim=True) dx = paddle.grad(y, x_tensor)[0].numpy() - self.assertTrue(np.allclose(np_grad, dx, equal_nan=True)) + np.testing.assert_allclose(np_grad, dx, rtol=1e-05, equal_nan=True) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py index 3bcafe53cb8..8bf893b1405 100755 --- a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py @@ -514,10 +514,11 @@ class TestNearestAPI(unittest.TestCase): out_h=12, out_w=12, align_corners=True) - self.assertTrue( - np.allclose(results[0], np.transpose(expect_res, (0, 2, 3, 1)))) + np.testing.assert_allclose(results[0], + np.transpose(expect_res, (0, 2, 3, 1)), + rtol=1e-05) for i in range(len(results) - 1): - self.assertTrue(np.allclose(results[i + 1], expect_res)) + np.testing.assert_allclose(results[i + 1], expect_res, rtol=1e-05) class TestNearestInterpException(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py index 44584c22825..07e913a0a2b 100755 --- a/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_nearest_interp_v2_op.py @@ -22,7 +22,7 @@ import paddle.fluid as fluid import paddle.nn as nn import paddle from paddle.nn.functional import interpolate -from paddle._C_ops import final_state_nearest_interp +from paddle import _C_ops paddle.enable_static() @@ -50,9 +50,10 @@ def nearest_interp_test(x, if not isinstance(SizeTensor, list) and not isinstance( SizeTensor, tuple): SizeTensor = [SizeTensor] - return final_state_nearest_interp(x, OutSize, SizeTensor, Scale, - data_layout, out_d, out_h, out_w, scale, - interp_method, align_corners, align_mode) + return _C_ops.final_state_nearest_interp(x, OutSize, SizeTensor, Scale, + data_layout, out_d, out_h, out_w, + scale, interp_method, + align_corners, align_mode) def nearest_neighbor_interp_np(X, @@ -710,10 +711,11 @@ class TestNearestAPI(unittest.TestCase): out_h=12, out_w=12, align_corners=True) - self.assertTrue( - np.allclose(results[0], np.transpose(expect_res, (0, 2, 3, 1)))) + np.testing.assert_allclose(results[0], + np.transpose(expect_res, (0, 2, 3, 1)), + rtol=1e-05) for i in range(len(results) - 1): - self.assertTrue(np.allclose(results[i + 1], expect_res)) + np.testing.assert_allclose(results[i + 1], expect_res, rtol=1e-05) class TestNearestInterpOpAPI_dy(unittest.TestCase): @@ -737,7 +739,7 @@ class TestNearestInterpOpAPI_dy(unittest.TestCase): scale_factor=scale, mode="nearest", align_corners=False) - self.assertTrue(np.allclose(out.numpy(), expect_res)) + np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) class TestNearestInterp3DOpAPI_dy(unittest.TestCase): @@ -763,7 +765,7 @@ class TestNearestInterp3DOpAPI_dy(unittest.TestCase): mode="nearest", align_corners=False, data_format="NCDHW") - self.assertTrue(np.allclose(out.numpy(), expect_res)) + np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-05) class TestNearestInterpException(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_neg_op.py b/python/paddle/fluid/tests/unittests/test_neg_op.py index 473d2b77bae..77c961408a5 100644 --- a/python/paddle/fluid/tests/unittests/test_neg_op.py +++ b/python/paddle/fluid/tests/unittests/test_neg_op.py @@ -30,7 +30,9 @@ class TestNegOp(unittest.TestCase): input = paddle.to_tensor(self.input) dy_result = paddle.neg(input) expected_result = np.negative(self.input) - self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) + np.testing.assert_allclose(dy_result.numpy(), + expected_result, + rtol=1e-05) def run_static(self, use_gpu=False): input = paddle.fluid.data(name='input', shape=[32, 8], dtype=self.dtype) @@ -41,7 +43,7 @@ class TestNegOp(unittest.TestCase): exe.run(paddle.static.default_startup_program()) st_result = exe.run(feed={"input": self.input}, fetch_list=[result]) expected_result = np.negative(self.input) - self.assertTrue(np.allclose(st_result[0], expected_result)) + np.testing.assert_allclose(st_result[0], expected_result, rtol=1e-05) def test_cpu(self): paddle.disable_static(place=paddle.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_nn_dice_loss.py b/python/paddle/fluid/tests/unittests/test_nn_dice_loss.py index 8dfaca25e27..ef8faad4e5b 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_dice_loss.py +++ b/python/paddle/fluid/tests/unittests/test_nn_dice_loss.py @@ -38,7 +38,7 @@ class TestDiceLossValue(unittest.TestCase): union_np = input_np.sum(-1) + label_np.sum(-1) dice_np = np.mean(1 - 2 * intersection_np / (union_np + eps)) dice_paddle = nn.dice_loss(input_, label_, eps) - self.assertTrue(np.isclose(dice_np, dice_paddle.numpy()).all()) + np.testing.assert_allclose(dice_np, dice_paddle.numpy(), rtol=1e-05) class TestDiceLossInvalidInput(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_nn_margin_rank_loss.py b/python/paddle/fluid/tests/unittests/test_nn_margin_rank_loss.py index 2fb1c92330b..42aecde3235 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_margin_rank_loss.py +++ b/python/paddle/fluid/tests/unittests/test_nn_margin_rank_loss.py @@ -73,7 +73,7 @@ def create_test_case(margin, reduction): "label": self.label_data }, fetch_list=[result]) - self.assertTrue(np.allclose(result_numpy, expected)) + np.testing.assert_allclose(result_numpy, expected, rtol=1e-05) def run_static_api(self, place): paddle.enable_static() @@ -102,7 +102,7 @@ def create_test_case(margin, reduction): "label": self.label_data }, fetch_list=[result]) - self.assertTrue(np.allclose(result_numpy, expected)) + np.testing.assert_allclose(result_numpy, expected, rtol=1e-05) self.assertTrue('loss' in result.name) def run_dynamic_functional_api(self, place): @@ -118,7 +118,7 @@ def create_test_case(margin, reduction): self.label_data, margin=margin, reduction=reduction) - self.assertTrue(np.allclose(result.numpy(), expected)) + np.testing.assert_allclose(result.numpy(), expected, rtol=1e-05) def run_dynamic_api(self, place): paddle.disable_static(place) @@ -133,7 +133,7 @@ def create_test_case(margin, reduction): self.label_data, margin=margin, reduction=reduction) - self.assertTrue(np.allclose(result.numpy(), expected)) + np.testing.assert_allclose(result.numpy(), expected, rtol=1e-05) def run_dynamic_broadcast_api(self, place): paddle.disable_static(place) @@ -149,7 +149,7 @@ def create_test_case(margin, reduction): label_data, margin=margin, reduction=reduction) - self.assertTrue(np.allclose(result.numpy(), expected)) + np.testing.assert_allclose(result.numpy(), expected, rtol=1e-05) def test_case(self): for place in self.places: diff --git a/python/paddle/fluid/tests/unittests/test_nn_quant_functional_layers.py b/python/paddle/fluid/tests/unittests/test_nn_quant_functional_layers.py index 86dc43bacf8..1b648662d4a 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_quant_functional_layers.py +++ b/python/paddle/fluid/tests/unittests/test_nn_quant_functional_layers.py @@ -32,7 +32,7 @@ class TestFunctionalLayers(unittest.TestCase): self.y = paddle.to_tensor(np.random.random(shape)) def check(self, x, y): - self.assertTrue(np.allclose(x.numpy(), y.numpy())) + np.testing.assert_allclose(x.numpy(), y.numpy(), rtol=1e-05) def test_quant_add(self): out_1 = paddle.add(self.x, self.y) diff --git a/python/paddle/fluid/tests/unittests/test_nn_sigmoid_op.py b/python/paddle/fluid/tests/unittests/test_nn_sigmoid_op.py index c43fcc51a81..b8bdea9faf6 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_sigmoid_op.py +++ b/python/paddle/fluid/tests/unittests/test_nn_sigmoid_op.py @@ -53,7 +53,7 @@ class TestNNSigmoidAPI(unittest.TestCase): fluid.backward.append_backward(paddle.mean(y)) exe = paddle.static.Executor(place) out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y]) - self.assertTrue(np.allclose(out[0], self.y)) + np.testing.assert_allclose(out[0], self.y, rtol=1e-05) self.assertTrue(y.name.startswith("api_sigmoid")) def check_dynamic_api(self, place): @@ -61,7 +61,7 @@ class TestNNSigmoidAPI(unittest.TestCase): x = paddle.to_tensor(self.x) mysigmoid = nn.Sigmoid() y = mysigmoid(x) - self.assertTrue(np.allclose(y.numpy(), self.y)) + np.testing.assert_allclose(y.numpy(), self.y, rtol=1e-05) def test_check_api(self): places = [fluid.CPUPlace()] @@ -93,13 +93,13 @@ class TestNNFunctionalSigmoidAPI(unittest.TestCase): y = functional.sigmoid(x, name="api_sigmoid") exe = paddle.static.Executor(fluid.CPUPlace()) out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y]) - self.assertTrue(np.allclose(out[0], self.y)) + np.testing.assert_allclose(out[0], self.y, rtol=1e-05) def check_dynamic_api(self): paddle.disable_static() x = paddle.to_tensor(self.x) y = functional.sigmoid(x) - self.assertTrue(np.allclose(y.numpy(), self.y)) + np.testing.assert_allclose(y.numpy(), self.y, rtol=1e-05) def test_check_api(self): places = [fluid.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_nonzero_api.py b/python/paddle/fluid/tests/unittests/test_nonzero_api.py index b107823277e..ef234930960 100644 --- a/python/paddle/fluid/tests/unittests/test_nonzero_api.py +++ b/python/paddle/fluid/tests/unittests/test_nonzero_api.py @@ -38,7 +38,7 @@ class TestNonZeroAPI(unittest.TestCase): fetch_list=[z.name], return_numpy=False) expect_out = np.array([[0, 0], [1, 1]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) data = np.array([True, True, False]) with program_guard(Program(), Program()): @@ -52,7 +52,7 @@ class TestNonZeroAPI(unittest.TestCase): fetch_list=[z.name], return_numpy=False) expect_out = np.array([[0], [1]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) def test_nonzero_api(self): data = np.array([[True, False], [False, True]]) @@ -64,7 +64,7 @@ class TestNonZeroAPI(unittest.TestCase): fetch_list=[y.name], return_numpy=False) expect_out = np.array([[0, 0], [1, 1]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) data = np.array([True, True, False]) with program_guard(Program(), Program()): @@ -75,7 +75,7 @@ class TestNonZeroAPI(unittest.TestCase): fetch_list=[y.name], return_numpy=False) expect_out = np.array([[0], [1]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) def test_dygraph_api(self): data_x = np.array([[True, False], [False, True]]) diff --git a/python/paddle/fluid/tests/unittests/test_normalize.py b/python/paddle/fluid/tests/unittests/test_normalize.py index ebcaf26955e..4d24affae72 100644 --- a/python/paddle/fluid/tests/unittests/test_normalize.py +++ b/python/paddle/fluid/tests/unittests/test_normalize.py @@ -43,17 +43,17 @@ class TestNNFunctionalNormalize(unittest.TestCase): def run_imperative(self): x = paddle.to_tensor(self.input_np) y = F.normalize(x) - self.assertTrue(np.allclose(y.numpy(), self.expected0)) + np.testing.assert_allclose(y.numpy(), self.expected0, rtol=1e-05) y = F.normalize(x, p=1.5) - self.assertTrue(np.allclose(y.numpy(), self.expected1)) + np.testing.assert_allclose(y.numpy(), self.expected1, rtol=1e-05) y = F.normalize(x, axis=0) - self.assertTrue(np.allclose(y.numpy(), self.expected2)) + np.testing.assert_allclose(y.numpy(), self.expected2, rtol=1e-05) x = paddle.to_tensor(self.input_np2) y = F.normalize(x, axis=0) - self.assertTrue(np.allclose(y.numpy(), self.expected3)) + np.testing.assert_allclose(y.numpy(), self.expected3, rtol=1e-05) self.assertRaises(BaseException, F.normalize, x) @@ -75,11 +75,11 @@ class TestNNFunctionalNormalize(unittest.TestCase): }, fetch_list=[result0, result1, result2, result4]) - self.assertTrue(np.allclose(static_result[0], self.expected0)) - self.assertTrue(np.allclose(static_result[1], self.expected1)) - self.assertTrue(np.allclose(static_result[2], self.expected2)) + np.testing.assert_allclose(static_result[0], self.expected0, rtol=1e-05) + np.testing.assert_allclose(static_result[1], self.expected1, rtol=1e-05) + np.testing.assert_allclose(static_result[2], self.expected2, rtol=1e-05) self.assertTrue('aaa' in result3.name) - self.assertTrue(np.allclose(static_result[3], self.expected3)) + np.testing.assert_allclose(static_result[3], self.expected3, rtol=1e-05) self.assertRaises(ValueError, F.normalize, x2) def test_cpu(self): diff --git a/python/paddle/fluid/tests/unittests/test_npair_loss_op.py b/python/paddle/fluid/tests/unittests/test_npair_loss_op.py index 470dfff788b..ff310f0a535 100644 --- a/python/paddle/fluid/tests/unittests/test_npair_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_npair_loss_op.py @@ -53,7 +53,11 @@ class TestNpairLossOp(unittest.TestCase): self.dtype = np.float32 def __assert_close(self, tensor, np_array, msg, atol=1e-4): - self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) + np.testing.assert_allclose(np.array(tensor), + np_array, + rtol=1e-05, + atol=atol, + err_msg=msg) def test_npair_loss(self): reg_lambda = 0.002 diff --git a/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py b/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py index 31bbaefd165..adab8f75262 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer_for_varbase.py @@ -41,7 +41,9 @@ class TestOptimizerForVarBase(unittest.TestCase): z.backward() opt.step() - self.assertTrue(np.allclose(x.numpy(), np.full([2, 3], -self.lr))) + np.testing.assert_allclose(x.numpy(), + np.full([2, 3], -self.lr), + rtol=1e-05) def run_optimizer_minimize_with_varbase_list_input(self, optimizer): x = paddle.zeros([2, 3]) @@ -55,7 +57,9 @@ class TestOptimizerForVarBase(unittest.TestCase): z.backward() opt.minimize(z) - self.assertTrue(np.allclose(x.numpy(), np.full([2, 3], -self.lr))) + np.testing.assert_allclose(x.numpy(), + np.full([2, 3], -self.lr), + rtol=1e-05) def func_test_adam_with_varbase_list_input(self): self.run_optimizer_step_with_varbase_list_input(optimizer.Adam) diff --git a/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py b/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py index 4331ea8ff31..02bffd23dfb 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py @@ -218,17 +218,9 @@ class TestMultiTask(unittest.TestCase): for loss_in_switch in [True, False]: hidden_1, pre_1, loss_1 = static(self.train_data, loss_in_switch, use_cuda) - self.assertTrue( - np.allclose(hidden_1, hidden_2), - msg='static hidden is {}\ndynamic hidden is {}'.format( - hidden_1, hidden_2)) - self.assertTrue( - np.allclose(pre_1, pre_2), - msg='static prediction is {}\ndynamic prediction is {}'.format( - pre_1, pre_2)) - self.assertTrue(np.allclose(loss_1, loss_2), - msg='static loss is {}\ndynamic loss is {}'.format( - loss_1, loss_2)) + np.testing.assert_allclose(hidden_1, hidden_2, rtol=1e-05) + np.testing.assert_allclose(pre_1, pre_2, rtol=1e-05) + np.testing.assert_allclose(loss_1, loss_2, rtol=1e-05) class TestMultiOptimizersMultiCardsError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_outer.py b/python/paddle/fluid/tests/unittests/test_outer.py index cffe8a895c7..13c0b7c65b1 100644 --- a/python/paddle/fluid/tests/unittests/test_outer.py +++ b/python/paddle/fluid/tests/unittests/test_outer.py @@ -61,37 +61,37 @@ class TestMultiplyApi(unittest.TestCase): x_data = np.random.rand(2, 10, 10).astype(np.float64) y_data = np.random.rand(2, 5, 10).astype(np.float64) res = self._run_static_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.outer(x_data, y_data))) + np.testing.assert_allclose(res, np.outer(x_data, y_data), rtol=1e-05) # test static computation graph: 2-d array x_data = np.random.rand(200, 5).astype(np.float64) y_data = np.random.rand(50, 5).astype(np.float64) res = self._run_static_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.outer(x_data, y_data))) + np.testing.assert_allclose(res, np.outer(x_data, y_data), rtol=1e-05) # test static computation graph: 1-d array x_data = np.random.rand(50).astype(np.float64) y_data = np.random.rand(50).astype(np.float64) res = self._run_static_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.outer(x_data, y_data))) + np.testing.assert_allclose(res, np.outer(x_data, y_data), rtol=1e-05) # test dynamic computation graph: 3-d array x_data = np.random.rand(5, 10, 10).astype(np.float64) y_data = np.random.rand(2, 10).astype(np.float64) res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.outer(x_data, y_data))) + np.testing.assert_allclose(res, np.outer(x_data, y_data), rtol=1e-05) # test dynamic computation graph: 2-d array x_data = np.random.rand(20, 50).astype(np.float64) y_data = np.random.rand(50).astype(np.float64) res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.outer(x_data, y_data))) + np.testing.assert_allclose(res, np.outer(x_data, y_data), rtol=1e-05) # test dynamic computation graph: Scalar x_data = np.random.rand(20, 10).astype(np.float32) y_data = np.random.rand(1).astype(np.float32).item() res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.outer(x_data, y_data), rtol=1e4)) + np.testing.assert_allclose(res, np.outer(x_data, y_data), rtol=10000.0) # test dynamic computation graph: 2-d array Complex x_data = np.random.rand(20, 50).astype( @@ -99,7 +99,7 @@ class TestMultiplyApi(unittest.TestCase): y_data = np.random.rand(50).astype( np.float64) + 1J * np.random.rand(50).astype(np.float64) res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.outer(x_data, y_data))) + np.testing.assert_allclose(res, np.outer(x_data, y_data), rtol=1e-05) # test dynamic computation graph: 3-d array Complex x_data = np.random.rand(5, 10, 10).astype( @@ -107,7 +107,7 @@ class TestMultiplyApi(unittest.TestCase): y_data = np.random.rand(2, 10).astype( np.float64) + 1J * np.random.rand(2, 10).astype(np.float64) res = self._run_dynamic_graph_case(x_data, y_data) - self.assertTrue(np.allclose(res, np.outer(x_data, y_data))) + np.testing.assert_allclose(res, np.outer(x_data, y_data), rtol=1e-05) def test_multiply(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_pad3d_op.py b/python/paddle/fluid/tests/unittests/test_pad3d_op.py index b277ebbb75d..c1c0aa7defb 100644 --- a/python/paddle/fluid/tests/unittests/test_pad3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad3d_op.py @@ -218,7 +218,7 @@ class TestPadAPI(unittest.TestCase): fetch_list=[result]) np_out = self._get_numpy_out(input_data, pad, mode, value) - self.assertTrue(np.allclose(fetches[0], np_out)) + np.testing.assert_allclose(fetches[0], np_out, rtol=1e-05) def check_static_result_2(self, place): paddle.enable_static() @@ -243,8 +243,8 @@ class TestPadAPI(unittest.TestCase): pad, mode, data_format="NDHWC") - self.assertTrue(np.allclose(fetches[0], np_out1)) - self.assertTrue(np.allclose(fetches[1], np_out2)) + np.testing.assert_allclose(fetches[0], np_out1, rtol=1e-05) + np.testing.assert_allclose(fetches[1], np_out2, rtol=1e-05) def check_static_result_3(self, place): paddle.enable_static() @@ -269,8 +269,8 @@ class TestPadAPI(unittest.TestCase): pad, mode, data_format="NDHWC") - self.assertTrue(np.allclose(fetches[0], np_out1)) - self.assertTrue(np.allclose(fetches[1], np_out2)) + np.testing.assert_allclose(fetches[0], np_out1, rtol=1e-05) + np.testing.assert_allclose(fetches[1], np_out2, rtol=1e-05) def check_static_result_4(self, place): paddle.enable_static() @@ -295,8 +295,8 @@ class TestPadAPI(unittest.TestCase): pad, mode, data_format="NDHWC") - self.assertTrue(np.allclose(fetches[0], np_out1)) - self.assertTrue(np.allclose(fetches[1], np_out2)) + np.testing.assert_allclose(fetches[0], np_out1, rtol=1e-05) + np.testing.assert_allclose(fetches[1], np_out2, rtol=1e-05) def _get_numpy_out(self, input_data, @@ -408,9 +408,9 @@ class TestPadAPI(unittest.TestCase): value=value, data_format="NCDHW") - self.assertTrue(np.allclose(y1.numpy(), np_out1)) - self.assertTrue(np.allclose(y2.numpy(), np_out2)) - self.assertTrue(np.allclose(y3.numpy(), np_out3)) + np.testing.assert_allclose(y1.numpy(), np_out1, rtol=1e-05) + np.testing.assert_allclose(y2.numpy(), np_out2, rtol=1e-05) + np.testing.assert_allclose(y3.numpy(), np_out3, rtol=1e-05) def test_dygraph_2(self): paddle.disable_static() @@ -455,9 +455,9 @@ class TestPadAPI(unittest.TestCase): value=value, data_format="NCHW") - self.assertTrue(np.allclose(y1.numpy(), np_out1)) - self.assertTrue(np.allclose(y2.numpy(), np_out2)) - self.assertTrue(np.allclose(y3.numpy(), np_out3)) + np.testing.assert_allclose(y1.numpy(), np_out1, rtol=1e-05) + np.testing.assert_allclose(y2.numpy(), np_out2, rtol=1e-05) + np.testing.assert_allclose(y3.numpy(), np_out3, rtol=1e-05) def test_dygraph_3(self): paddle.disable_static() @@ -501,9 +501,9 @@ class TestPadAPI(unittest.TestCase): value=value, data_format="NCL") - self.assertTrue(np.allclose(y1.numpy(), np_out1)) - self.assertTrue(np.allclose(y2.numpy(), np_out2)) - self.assertTrue(np.allclose(y3.numpy(), np_out3)) + np.testing.assert_allclose(y1.numpy(), np_out1, rtol=1e-05) + np.testing.assert_allclose(y2.numpy(), np_out2, rtol=1e-05) + np.testing.assert_allclose(y3.numpy(), np_out3, rtol=1e-05) class TestPad1dAPI(unittest.TestCase): @@ -567,14 +567,14 @@ class TestPad1dAPI(unittest.TestCase): pad, "reflect", data_format="NCL") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_replication(data) np_out = self._get_numpy_out(input_data, pad, "replicate", data_format="NCL") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_constant(data) np_out = self._get_numpy_out(input_data, @@ -582,14 +582,14 @@ class TestPad1dAPI(unittest.TestCase): "constant", value=value, data_format="NCL") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_constant_int(data) np_out = self._get_numpy_out(input_data, [pad_int] * 2, "constant", value=value, data_format="NCL") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_circular(data) np_out = self._get_numpy_out(input_data, @@ -597,7 +597,7 @@ class TestPad1dAPI(unittest.TestCase): "circular", value=value, data_format="NCL") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) class TestPad2dAPI(unittest.TestCase): @@ -663,14 +663,14 @@ class TestPad2dAPI(unittest.TestCase): pad, "reflect", data_format="NCHW") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_replication(data) np_out = self._get_numpy_out(input_data, pad, "replicate", data_format="NCHW") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_constant(data) np_out = self._get_numpy_out(input_data, @@ -678,21 +678,21 @@ class TestPad2dAPI(unittest.TestCase): "constant", value=value, data_format="NCHW") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_constant_int(data) np_out = self._get_numpy_out(input_data, [pad_int] * 4, "constant", value=value, data_format="NCHW") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_circular(data) np_out = self._get_numpy_out(input_data, pad, "circular", data_format="NCHW") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) class TestPad3dAPI(unittest.TestCase): @@ -760,14 +760,14 @@ class TestPad3dAPI(unittest.TestCase): pad, "reflect", data_format="NCDHW") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_replication(data) np_out = self._get_numpy_out(input_data, pad, "replicate", data_format="NCDHW") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_constant(data) np_out = self._get_numpy_out(input_data, @@ -775,21 +775,21 @@ class TestPad3dAPI(unittest.TestCase): "constant", value=value, data_format="NCDHW") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_constant_int(data) np_out = self._get_numpy_out(input_data, [pad_int] * 6, "constant", value=value, data_format="NCDHW") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_circular(data) np_out = self._get_numpy_out(input_data, pad, "circular", data_format="NCDHW") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) def test_pad_tensor(self): paddle.disable_static() @@ -812,14 +812,14 @@ class TestPad3dAPI(unittest.TestCase): pad, "reflect", data_format="NCDHW") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) output = pad_reflection_ndhwc(data) np_out = self._get_numpy_out(input_data, pad, "reflect", data_format="NDHWC") - self.assertTrue(np.allclose(output.numpy(), np_out)) + np.testing.assert_allclose(output.numpy(), np_out, rtol=1e-05) class TestPad3dOpError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py index 0faeaec53d2..967e3c5f2e0 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py @@ -190,7 +190,9 @@ class TestDygraphDoubleGrad(TestCase): ) dx_expected = dy_expected * grad_y_np + dz_expected * grad_z_np - self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected)) + np.testing.assert_allclose(dx_actual.numpy(), + dx_expected, + rtol=1e-05) if grad_y is not None: self.assertTrue(grad_y.stop_gradient) @@ -229,7 +231,7 @@ class TestDygraphDoubleGrad(TestCase): # Theoritical result based on math calculation dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * (x_np > 0) * 2).astype('float32') - self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected)) + np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) if not _in_legacy_dygraph(): pass @@ -242,7 +244,9 @@ class TestDygraphDoubleGrad(TestCase): 2.0 / float(numel) * (x_np + dx_expected * (x_np > 0) * 2 / float(numel))).astype('float32') - self.assertTrue(np.allclose(x_grad_actual, x_grad_expected)) + np.testing.assert_allclose(x_grad_actual, + x_grad_expected, + rtol=1e-05) def test_example_with_gradient_accumulation_and_create_graph(self): with _test_eager_guard(): @@ -273,7 +277,7 @@ class TestDygraphDoubleGrad(TestCase): dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + y2.numpy()) * (x_np > 0) * 2).astype('float32') - self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected)) + np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) if not _in_legacy_dygraph(): pass @@ -286,7 +290,9 @@ class TestDygraphDoubleGrad(TestCase): 2.0 / float(numel) * (x_np + dx_expected * (x_np > 0) * 4 / float(numel))).astype('float32') - self.assertTrue(np.allclose(x_grad_actual, x_grad_expected)) + np.testing.assert_allclose(x_grad_actual, + x_grad_expected, + rtol=1e-05) def test_example_with_gradient_accumulation_and_no_grad_vars(self): with _test_eager_guard(): @@ -315,7 +321,7 @@ class TestDygraphDoubleGrad(TestCase): dx_expected = (1.0 / float(numel) * (np.maximum(x_np, 0) + 1) * (x_np > 0) * 2).astype('float32') - self.assertTrue(np.allclose(dx_actual.numpy(), dx_expected)) + np.testing.assert_allclose(dx_actual.numpy(), dx_expected, rtol=1e-05) if not _in_legacy_dygraph(): pass @@ -325,7 +331,9 @@ class TestDygraphDoubleGrad(TestCase): x_grad_actual = x.gradient() x_grad_expected = (2.0 * x_np / float(numel)).astype('float32') - self.assertTrue(np.allclose(x_grad_actual, x_grad_expected)) + np.testing.assert_allclose(x_grad_actual, + x_grad_expected, + rtol=1e-05) def test_example_with_gradient_accumulation_and_not_create_graph(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_pairwise_distance.py b/python/paddle/fluid/tests/unittests/test_pairwise_distance.py index bdcc302de54..5a65da5940c 100644 --- a/python/paddle/fluid/tests/unittests/test_pairwise_distance.py +++ b/python/paddle/fluid/tests/unittests/test_pairwise_distance.py @@ -189,12 +189,15 @@ class TestPairwiseDistance(unittest.TestCase): self.assertEqual(legacy_ret.shape, excepted_value.shape) - self.assertTrue( - np.allclose(static_ret, excepted_value)) - self.assertTrue( - np.allclose(dygraph_ret, excepted_value)) - self.assertTrue( - np.allclose(legacy_ret, excepted_value)) + np.testing.assert_allclose(static_ret, + excepted_value, + rtol=1e-05) + np.testing.assert_allclose(dygraph_ret, + excepted_value, + rtol=1e-05) + np.testing.assert_allclose(legacy_ret, + excepted_value, + rtol=1e-05) static_functional_ret = test_static(place, x_np, @@ -224,15 +227,15 @@ class TestPairwiseDistance(unittest.TestCase): self.assertEqual(legacy_functional_ret.shape, excepted_value.shape) - self.assertTrue( - np.allclose(static_functional_ret, - excepted_value)) - self.assertTrue( - np.allclose(dygraph_functional_ret, - excepted_value)) - self.assertTrue( - np.allclose(legacy_functional_ret, - excepted_value)) + np.testing.assert_allclose(static_functional_ret, + excepted_value, + rtol=1e-05) + np.testing.assert_allclose(dygraph_functional_ret, + excepted_value, + rtol=1e-05) + np.testing.assert_allclose(legacy_functional_ret, + excepted_value, + rtol=1e-05) def test_pairwise_distance_broadcast_1(self): shape_x = [100, 100] @@ -266,9 +269,9 @@ class TestPairwiseDistance(unittest.TestCase): self.assertEqual(dygraph_ret.shape, excepted_value.shape) self.assertEqual(legacy_ret.shape, excepted_value.shape) - self.assertTrue(np.allclose(static_ret, excepted_value)) - self.assertTrue(np.allclose(dygraph_ret, excepted_value)) - self.assertTrue(np.allclose(legacy_ret, excepted_value)) + np.testing.assert_allclose(static_ret, excepted_value, rtol=1e-05) + np.testing.assert_allclose(dygraph_ret, excepted_value, rtol=1e-05) + np.testing.assert_allclose(legacy_ret, excepted_value, rtol=1e-05) static_functional_ret = test_static(place=place, x_np=x_np, @@ -293,9 +296,15 @@ class TestPairwiseDistance(unittest.TestCase): self.assertEqual(dygraph_functional_ret.shape, excepted_value.shape) self.assertEqual(legacy_functional_ret.shape, excepted_value.shape) - self.assertTrue(np.allclose(static_functional_ret, excepted_value)) - self.assertTrue(np.allclose(dygraph_functional_ret, excepted_value)) - self.assertTrue(np.allclose(legacy_functional_ret, excepted_value)) + np.testing.assert_allclose(static_functional_ret, + excepted_value, + rtol=1e-05) + np.testing.assert_allclose(dygraph_functional_ret, + excepted_value, + rtol=1e-05) + np.testing.assert_allclose(legacy_functional_ret, + excepted_value, + rtol=1e-05) def test_pairwise_distance_broadcast_2(self): shape_x = [100, 100] @@ -329,9 +338,9 @@ class TestPairwiseDistance(unittest.TestCase): self.assertEqual(dygraph_ret.shape, excepted_value.shape) self.assertEqual(legacy_ret.shape, excepted_value.shape) - self.assertTrue(np.allclose(static_ret, excepted_value)) - self.assertTrue(np.allclose(dygraph_ret, excepted_value)) - self.assertTrue(np.allclose(legacy_ret, excepted_value)) + np.testing.assert_allclose(static_ret, excepted_value, rtol=1e-05) + np.testing.assert_allclose(dygraph_ret, excepted_value, rtol=1e-05) + np.testing.assert_allclose(legacy_ret, excepted_value, rtol=1e-05) static_functional_ret = test_static(place=place, x_np=x_np, @@ -356,9 +365,15 @@ class TestPairwiseDistance(unittest.TestCase): self.assertEqual(dygraph_functional_ret.shape, excepted_value.shape) self.assertEqual(legacy_functional_ret.shape, excepted_value.shape) - self.assertTrue(np.allclose(static_functional_ret, excepted_value)) - self.assertTrue(np.allclose(dygraph_functional_ret, excepted_value)) - self.assertTrue(np.allclose(legacy_functional_ret, excepted_value)) + np.testing.assert_allclose(static_functional_ret, + excepted_value, + rtol=1e-05) + np.testing.assert_allclose(dygraph_functional_ret, + excepted_value, + rtol=1e-05) + np.testing.assert_allclose(legacy_functional_ret, + excepted_value, + rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py index f2a753a9874..f58797cfc17 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py @@ -273,8 +273,7 @@ class TestMNISTNoReduce(unittest.TestCase): self.assertEqual(len(grads_multi_place), len(grads_single_place)) for g1, g2 in zip(grads_multi_place, grads_single_place): - self.assertTrue(np.allclose(g1, g2), - 'g1 = {}\ng2 = {}\n'.format(g1, g2)) + np.testing.assert_allclose(g1, g2, rtol=1e-05) def split_feed(self, feed, n): image = feed['image'] diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_run_cinn.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_run_cinn.py index 00bacaa58e8..88eb37c219f 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_run_cinn.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_run_cinn.py @@ -117,7 +117,10 @@ class TestParallelExecutorRunCinn(unittest.TestCase): cinn_losses = train(self.tmpdir, "paddle") set_cinn_flag(False) pd_losses = train(self.tmpdir, "cinn") - self.assertTrue(np.allclose(cinn_losses, pd_losses, atol=1e-5)) + np.testing.assert_allclose(cinn_losses, + pd_losses, + rtol=1e-05, + atol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py index e9f4e679d5a..0c3e47ea37f 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py @@ -70,10 +70,10 @@ class ParallelExecutorTestingDuringTraining(unittest.TestCase): if math.isnan(float(avg_train_loss_val)): sys.exit("got NaN loss, training failed.") - self.assertTrue( - np.allclose(train_loss, test_loss, - atol=1e-2), "Train loss: " + str(train_loss) + - "\n Test loss:" + str(test_loss)) + np.testing.assert_allclose(train_loss, + test_loss, + rtol=1e-05, + atol=0.01) def test_parallel_testing(self): build_strategy = fluid.BuildStrategy() diff --git a/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py b/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py index 05b158624dd..5ea42f5c496 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_shuffle.py @@ -182,11 +182,13 @@ class TestPixelShuffleAPI(unittest.TestCase): data_format=data_format) result = pixel_shuffle(paddle.to_tensor(x)) - self.assertTrue(np.allclose(result.numpy(), npresult)) + np.testing.assert_allclose(result.numpy(), npresult, rtol=1e-05) result_functional = F.pixel_shuffle(paddle.to_tensor(x), 3, data_format) - self.assertTrue(np.allclose(result_functional.numpy(), npresult)) + np.testing.assert_allclose(result_functional.numpy(), + npresult, + rtol=1e-05) def test_dygraph1(self): self.run_dygraph(3, "NCHW") diff --git a/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py b/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py index 1ae2c016e25..5ab545222d8 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py @@ -204,11 +204,13 @@ class TestPixelUnshuffleAPI(unittest.TestCase): data_format=data_format) result = pixel_unshuffle(paddle.to_tensor(x)) - self.assertTrue(np.allclose(result.numpy(), npresult)) + np.testing.assert_allclose(result.numpy(), npresult, rtol=1e-05) result_functional = F.pixel_unshuffle(paddle.to_tensor(x), 3, data_format) - self.assertTrue(np.allclose(result_functional.numpy(), npresult)) + np.testing.assert_allclose(result_functional.numpy(), + npresult, + rtol=1e-05) pixel_unshuffle_str = 'downscale_factor={}'.format(down_factor) if data_format != 'NCHW': diff --git a/python/paddle/fluid/tests/unittests/test_poisson_op.py b/python/paddle/fluid/tests/unittests/test_poisson_op.py index 764ba03d401..26fb1e1634d 100644 --- a/python/paddle/fluid/tests/unittests/test_poisson_op.py +++ b/python/paddle/fluid/tests/unittests/test_poisson_op.py @@ -56,8 +56,7 @@ class TestPoissonOp1(OpTest): def verify_output(self, outs): hist, prob = output_hist(np.array(outs[0]), self.lam, self.a, self.b) - self.assertTrue(np.allclose(hist, prob, rtol=0.01), - "actual: {}, expected: {}".format(hist, prob)) + np.testing.assert_allclose(hist, prob, rtol=0.01) def test_check_output(self): self.check_output_customized(self.verify_output) diff --git a/python/paddle/fluid/tests/unittests/test_pool1d_api.py b/python/paddle/fluid/tests/unittests/test_pool1d_api.py index 2dd26bef9d1..665cdfbd31c 100644 --- a/python/paddle/fluid/tests/unittests/test_pool1d_api.py +++ b/python/paddle/fluid/tests/unittests/test_pool1d_api.py @@ -128,7 +128,7 @@ class TestPool1D_API(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": input_np}, fetch_list=[result]) - self.assertTrue(np.allclose(fetches[0], result_np)) + np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_avg_dygraph_results(self, place): with fluid.dygraph.guard(place): @@ -141,13 +141,13 @@ class TestPool1D_API(unittest.TestCase): strides=[2], paddings=[0]) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) avg_pool1d_dg = paddle.nn.layer.AvgPool1D(kernel_size=2, stride=None, padding=0) result = avg_pool1d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_padding_results(self, place): with fluid.dygraph.guard(place): @@ -165,7 +165,7 @@ class TestPool1D_API(unittest.TestCase): paddings=[1], exclusive=False) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) avg_pool1d_dg = paddle.nn.AvgPool1D(kernel_size=2, stride=None, @@ -173,7 +173,7 @@ class TestPool1D_API(unittest.TestCase): exclusive=True) result = avg_pool1d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -190,7 +190,7 @@ class TestPool1D_API(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": input_np}, fetch_list=[result]) - self.assertTrue(np.allclose(fetches[0], result_np)) + np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_max_dygraph_results(self, place): with fluid.dygraph.guard(place): @@ -203,13 +203,13 @@ class TestPool1D_API(unittest.TestCase): strides=[2], paddings=[0]) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) max_pool1d_dg = paddle.nn.layer.MaxPool1D(kernel_size=2, stride=None, padding=0) result = max_pool1d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_return_index_results(self, place): with fluid.dygraph.guard(place): @@ -226,13 +226,13 @@ class TestPool1D_API(unittest.TestCase): strides=[2], paddings=[0]) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) max_pool1d_dg = paddle.nn.layer.MaxPool1D(kernel_size=2, stride=None, padding=0) result = max_pool1d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_padding_same(self, place): with fluid.dygraph.guard(place): @@ -248,7 +248,7 @@ class TestPool1D_API(unittest.TestCase): strides=[2], paddings=[0]) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_padding_same(self, place): with fluid.dygraph.guard(place): @@ -264,7 +264,7 @@ class TestPool1D_API(unittest.TestCase): strides=[2], paddings=[0]) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def test_pool1d(self): for place in self.places: diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_api.py b/python/paddle/fluid/tests/unittests/test_pool2d_api.py index b17c0ea0391..4a4e03ed99a 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_api.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_api.py @@ -49,7 +49,7 @@ class TestPool2D_API(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": input_np}, fetch_list=[result]) - self.assertTrue(np.allclose(fetches[0], result_np)) + np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_avg_dygraph_results(self, place): with fluid.dygraph.guard(place): @@ -62,13 +62,13 @@ class TestPool2D_API(unittest.TestCase): strides=[2, 2], paddings=[0, 0], pool_type='avg') - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) avg_pool2d_dg = paddle.nn.layer.AvgPool2D(kernel_size=2, stride=2, padding=0) result = avg_pool2d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_padding_results(self, place): with fluid.dygraph.guard(place): @@ -86,14 +86,14 @@ class TestPool2D_API(unittest.TestCase): paddings=[1, 1], ceil_mode=False, exclusive=False) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) avg_pool2d_dg = paddle.nn.layer.AvgPool2D(kernel_size=2, stride=2, padding=1, ceil_mode=False) result = avg_pool2d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_ceilmode_results(self, place): with fluid.dygraph.guard(place): @@ -110,14 +110,14 @@ class TestPool2D_API(unittest.TestCase): strides=[2, 2], paddings=[0, 0], ceil_mode=True) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) avg_pool2d_dg = paddle.nn.layer.AvgPool2D(kernel_size=2, stride=2, padding=0, ceil_mode=True) result = avg_pool2d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -137,7 +137,7 @@ class TestPool2D_API(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": input_np}, fetch_list=[result]) - self.assertTrue(np.allclose(fetches[0], result_np)) + np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_max_dygraph_results(self, place): with fluid.dygraph.guard(place): @@ -154,13 +154,13 @@ class TestPool2D_API(unittest.TestCase): strides=[2, 2], paddings=[0, 0], pool_type='max') - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) max_pool2d_dg = paddle.nn.layer.MaxPool2D(kernel_size=2, stride=2, padding=0) result = max_pool2d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_nhwc_results(self, place): with fluid.dygraph.guard(place): @@ -179,9 +179,10 @@ class TestPool2D_API(unittest.TestCase): strides=[2, 2], paddings=[0, 0], pool_type='max') - self.assertTrue( - np.allclose(np.transpose(result.numpy(), [0, 3, 1, 2]), - result_np)) + np.testing.assert_allclose(np.transpose(result.numpy(), + [0, 3, 1, 2]), + result_np, + rtol=1e-05) def check_max_dygraph_padding_results(self, place): with fluid.dygraph.guard(place): @@ -199,14 +200,14 @@ class TestPool2D_API(unittest.TestCase): paddings=[1, 1], ceil_mode=False, exclusive=False) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) max_pool2d_dg = paddle.nn.layer.MaxPool2D(kernel_size=2, stride=2, padding=1, ceil_mode=False) result = max_pool2d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_ceilmode_results(self, place): with fluid.dygraph.guard(place): @@ -223,14 +224,14 @@ class TestPool2D_API(unittest.TestCase): strides=[2, 2], paddings=[0, 0], ceil_mode=True) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) max_pool2d_dg = paddle.nn.layer.MaxPool2D(kernel_size=2, stride=2, padding=0, ceil_mode=True) result = max_pool2d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_stride_is_none(self, place): with fluid.dygraph.guard(place): @@ -248,13 +249,13 @@ class TestPool2D_API(unittest.TestCase): paddings=[0, 0], pool_type='max', padding_algorithm="SAME") - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) max_pool2d_dg = paddle.nn.layer.MaxPool2D(kernel_size=2, stride=2, padding=0) result = max_pool2d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_stride_is_none(self, place): with fluid.dygraph.guard(place): @@ -271,13 +272,13 @@ class TestPool2D_API(unittest.TestCase): paddings=[0, 0], pool_type='avg', padding_algorithm="SAME") - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) avg_pool2d_dg = paddle.nn.layer.AvgPool2D(kernel_size=2, stride=2, padding=0) result = avg_pool2d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_padding(self, place): with fluid.dygraph.guard(place): @@ -295,13 +296,13 @@ class TestPool2D_API(unittest.TestCase): strides=[2, 2], paddings=[0, 0], pool_type='max') - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) max_pool2d_dg = paddle.nn.layer.MaxPool2D(kernel_size=2, stride=2, padding=0) result = max_pool2d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_divisor(self, place): with fluid.dygraph.guard(place): @@ -319,13 +320,13 @@ class TestPool2D_API(unittest.TestCase): strides=[2, 2], paddings=[0, 0], pool_type='avg') - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) avg_pool2d_dg = paddle.nn.layer.AvgPool2D(kernel_size=2, stride=2, padding=0) result = avg_pool2d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def test_pool2d(self): for place in self.places: diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index 81fa00986d7..4b3f6ca77c8 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -1442,7 +1442,7 @@ class TestDygraphPool2DAPI(unittest.TestCase): paddings=[0, 0], pool_type='max', data_format='NHWC') - self.assertTrue(np.allclose(out1.numpy(), out2)) + np.testing.assert_allclose(out1.numpy(), out2, rtol=1e-05) def test_lower_case(self): with fluid.dygraph.guard(): @@ -1459,7 +1459,7 @@ class TestDygraphPool2DAPI(unittest.TestCase): paddings=[0, 0], pool_type='max', data_format='NHWC') - self.assertTrue(np.allclose(out1.numpy(), out2)) + np.testing.assert_allclose(out1.numpy(), out2, rtol=1e-05) def test_upper_case(self): with fluid.dygraph.guard(): @@ -1476,7 +1476,7 @@ class TestDygraphPool2DAPI(unittest.TestCase): paddings=[0, 0], pool_type='max', data_format='NHWC') - self.assertTrue(np.allclose(out1.numpy(), out2)) + np.testing.assert_allclose(out1.numpy(), out2, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_pool3d_api.py b/python/paddle/fluid/tests/unittests/test_pool3d_api.py index 3ecfb06bb58..cd874dfa131 100644 --- a/python/paddle/fluid/tests/unittests/test_pool3d_api.py +++ b/python/paddle/fluid/tests/unittests/test_pool3d_api.py @@ -53,7 +53,7 @@ class TestPool3D_API(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": input_np}, fetch_list=[result]) - self.assertTrue(np.allclose(fetches[0], result_np)) + np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_avg_dygraph_results(self, place): with fluid.dygraph.guard(place): @@ -68,13 +68,13 @@ class TestPool3D_API(unittest.TestCase): pool_type='avg', padding_algorithm="SAME") - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) avg_pool3d_dg = paddle.nn.layer.AvgPool3D(kernel_size=2, stride=None, padding="SAME") result = avg_pool3d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_padding_results(self, place): with fluid.dygraph.guard(place): @@ -94,7 +94,7 @@ class TestPool3D_API(unittest.TestCase): ceil_mode=False, exclusive=False) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) avg_pool3d_dg = paddle.nn.layer.AvgPool3D(kernel_size=2, stride=None, @@ -102,7 +102,7 @@ class TestPool3D_API(unittest.TestCase): ceil_mode=False, exclusive=True) result = avg_pool3d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_ceilmode_results(self, place): with fluid.dygraph.guard(place): @@ -120,14 +120,14 @@ class TestPool3D_API(unittest.TestCase): paddings=[0, 0, 0], ceil_mode=True) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) avg_pool3d_dg = paddle.nn.layer.AvgPool3D(kernel_size=2, stride=None, padding=0, ceil_mode=True) result = avg_pool3d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -147,7 +147,7 @@ class TestPool3D_API(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": input_np}, fetch_list=[result]) - self.assertTrue(np.allclose(fetches[0], result_np)) + np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_max_dygraph_results(self, place): with fluid.dygraph.guard(place): @@ -161,12 +161,12 @@ class TestPool3D_API(unittest.TestCase): paddings=[0, 0, 0], pool_type='max') - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) max_pool3d_dg = paddle.nn.layer.MaxPool3D(kernel_size=2, stride=None, padding=0) result = max_pool3d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_ndhwc_results(self, place): with fluid.dygraph.guard(place): @@ -186,9 +186,10 @@ class TestPool3D_API(unittest.TestCase): paddings=[0, 0, 0], pool_type='max') - self.assertTrue( - np.allclose(np.transpose(result.numpy(), [0, 4, 1, 2, 3]), - result_np)) + np.testing.assert_allclose(np.transpose(result.numpy(), + [0, 4, 1, 2, 3]), + result_np, + rtol=1e-05) def check_max_dygraph_ceilmode_results(self, place): with fluid.dygraph.guard(place): @@ -206,14 +207,14 @@ class TestPool3D_API(unittest.TestCase): paddings=[0, 0, 0], ceil_mode=True) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) max_pool3d_dg = paddle.nn.layer.MaxPool3D(kernel_size=2, stride=None, padding=0, ceil_mode=True) result = max_pool3d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_padding_results(self, place): with fluid.dygraph.guard(place): @@ -231,14 +232,14 @@ class TestPool3D_API(unittest.TestCase): paddings=[1, 1, 1], ceil_mode=False) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) max_pool3d_dg = paddle.nn.layer.MaxPool3D(kernel_size=2, stride=None, padding=1, ceil_mode=False) result = max_pool3d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_stride_is_none(self, place): with fluid.dygraph.guard(place): @@ -257,12 +258,12 @@ class TestPool3D_API(unittest.TestCase): pool_type='max', padding_algorithm="SAME") - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) max_pool3d_dg = paddle.nn.layer.MaxPool3D(kernel_size=2, stride=2, padding=0) result = max_pool3d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_padding(self, place): with fluid.dygraph.guard(place): @@ -277,16 +278,16 @@ class TestPool3D_API(unittest.TestCase): paddings=[0, 0, 0], pool_type='max') - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) max_pool3d_dg = paddle.nn.layer.MaxPool3D(kernel_size=2, stride=2, padding=0) result = max_pool3d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) padding = [0, 0, 0, 0, 0, 0] result = max_pool3d(input, kernel_size=2, stride=2, padding=padding) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_divisor(self, place): with fluid.dygraph.guard(place): @@ -305,12 +306,12 @@ class TestPool3D_API(unittest.TestCase): paddings=[0, 0, 0], pool_type='avg') - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) avg_pool3d_dg = paddle.nn.layer.AvgPool3D(kernel_size=2, stride=2, padding=0) result = avg_pool3d_dg(input) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) padding = [0, 0, 0, 0, 0, 0] result = avg_pool3d(input, @@ -318,7 +319,7 @@ class TestPool3D_API(unittest.TestCase): stride=2, padding=padding, divisor_override=8) - self.assertTrue(np.allclose(result.numpy(), result_np)) + np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def test_pool3d(self): for place in self.places: diff --git a/python/paddle/fluid/tests/unittests/test_pow.py b/python/paddle/fluid/tests/unittests/test_pow.py index 5da9f45fc10..f0c7b19e0a2 100755 --- a/python/paddle/fluid/tests/unittests/test_pow.py +++ b/python/paddle/fluid/tests/unittests/test_pow.py @@ -77,61 +77,61 @@ class TestPowerAPI(unittest.TestCase): x = (np.random.rand(*dims) * 10).astype(np.float64) y = np.random.rand() * 10 res = _run_power(DYNAMIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) res = _run_power(STATIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) # test 1-d float tensor ** int scalar dims = (np.random.randint(200, 300), ) x = (np.random.rand(*dims) * 10).astype(np.float64) y = int(np.random.rand() * 10) res = _run_power(DYNAMIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) res = _run_power(STATIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) x = (np.random.rand(*dims) * 10).astype(np.int64) y = int(np.random.rand() * 10) res = _run_power(DYNAMIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) res = _run_power(STATIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) # test 1-d float tensor ** 1-d float tensor dims = (np.random.randint(200, 300), ) x = (np.random.rand(*dims) * 10).astype(np.float64) y = (np.random.rand(*dims) * 10).astype(np.float64) res = _run_power(DYNAMIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) res = _run_power(STATIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) # test 1-d int tensor ** 1-d int tensor dims = (np.random.randint(200, 300), ) x = (np.random.rand(*dims) * 10).astype(np.int64) y = (np.random.rand(*dims) * 10).astype(np.int64) res = _run_power(DYNAMIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) res = _run_power(STATIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) # test 1-d int tensor ** 1-d int tensor dims = (np.random.randint(200, 300), ) x = (np.random.rand(*dims) * 10).astype(np.int32) y = (np.random.rand(*dims) * 10).astype(np.int32) res = _run_power(DYNAMIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) res = _run_power(STATIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) # test 1-d int tensor ** 1-d int tensor dims = (np.random.randint(200, 300), ) x = (np.random.rand(*dims) * 10).astype(np.float32) y = (np.random.rand(*dims) * 10).astype(np.float32) res = _run_power(DYNAMIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) res = _run_power(STATIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) # test broadcast dims = (np.random.randint(1, 10), np.random.randint(5, 10), @@ -139,9 +139,9 @@ class TestPowerAPI(unittest.TestCase): x = (np.random.rand(*dims) * 10).astype(np.float64) y = (np.random.rand(dims[-1]) * 10).astype(np.float64) res = _run_power(DYNAMIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) res = _run_power(STATIC, x, y) - self.assertTrue(np.allclose(res, np.power(x, y))) + np.testing.assert_allclose(res, np.power(x, y), rtol=1e-05) class TestPowerError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_prod_op.py b/python/paddle/fluid/tests/unittests/test_prod_op.py index 656601e05d1..19611a933cc 100644 --- a/python/paddle/fluid/tests/unittests/test_prod_op.py +++ b/python/paddle/fluid/tests/unittests/test_prod_op.py @@ -28,34 +28,49 @@ class TestProdOp(unittest.TestCase): input = paddle.to_tensor(self.input) dy_result = paddle.prod(input) expected_result = np.prod(self.input) - self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) + np.testing.assert_allclose(dy_result.numpy(), + expected_result, + rtol=1e-05) dy_result = paddle.prod(input, axis=1) expected_result = np.prod(self.input, axis=1) - self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) + np.testing.assert_allclose(dy_result.numpy(), + expected_result, + rtol=1e-05) dy_result = paddle.prod(input, axis=-1) expected_result = np.prod(self.input, axis=-1) - self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) + np.testing.assert_allclose(dy_result.numpy(), + expected_result, + rtol=1e-05) dy_result = paddle.prod(input, axis=[0, 1]) expected_result = np.prod(self.input, axis=(0, 1)) - self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) + np.testing.assert_allclose(dy_result.numpy(), + expected_result, + rtol=1e-05, + atol=1e-8) dy_result = paddle.prod(input, axis=1, keepdim=True) expected_result = np.prod(self.input, axis=1, keepdims=True) - self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) + np.testing.assert_allclose(dy_result.numpy(), + expected_result, + rtol=1e-05) dy_result = paddle.prod(input, axis=1, dtype='int64') expected_result = np.prod(self.input, axis=1, dtype=np.int64) - self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) + np.testing.assert_allclose(dy_result.numpy(), + expected_result, + rtol=1e-05) dy_result = paddle.prod(input, axis=1, keepdim=True, dtype='int64') expected_result = np.prod(self.input, axis=1, keepdims=True, dtype=np.int64) - self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) + np.testing.assert_allclose(dy_result.numpy(), + expected_result, + rtol=1e-05) def run_static(self, use_gpu=False): input = paddle.fluid.data(name='input', @@ -79,22 +94,37 @@ class TestProdOp(unittest.TestCase): ]) expected_result = np.prod(self.input) - self.assertTrue(np.allclose(static_result[0], expected_result)) + np.testing.assert_allclose(static_result[0], + expected_result, + rtol=1e-05) expected_result = np.prod(self.input, axis=1) - self.assertTrue(np.allclose(static_result[1], expected_result)) + np.testing.assert_allclose(static_result[1], + expected_result, + rtol=1e-05) expected_result = np.prod(self.input, axis=-1) - self.assertTrue(np.allclose(static_result[2], expected_result)) + np.testing.assert_allclose(static_result[2], + expected_result, + rtol=1e-05) expected_result = np.prod(self.input, axis=(0, 1)) - self.assertTrue(np.allclose(static_result[3], expected_result)) + np.testing.assert_allclose(static_result[3], + expected_result, + rtol=1e-05, + atol=1e-8) expected_result = np.prod(self.input, axis=1, keepdims=True) - self.assertTrue(np.allclose(static_result[4], expected_result)) + np.testing.assert_allclose(static_result[4], + expected_result, + rtol=1e-05) expected_result = np.prod(self.input, axis=1, dtype=np.int64) - self.assertTrue(np.allclose(static_result[5], expected_result)) + np.testing.assert_allclose(static_result[5], + expected_result, + rtol=1e-05) expected_result = np.prod(self.input, axis=1, keepdims=True, dtype=np.int64) - self.assertTrue(np.allclose(static_result[6], expected_result)) + np.testing.assert_allclose(static_result[6], + expected_result, + rtol=1e-05) def test_cpu(self): paddle.disable_static(place=paddle.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_prune.py b/python/paddle/fluid/tests/unittests/test_prune.py index 180988a2aa5..6d7518f1b43 100644 --- a/python/paddle/fluid/tests/unittests/test_prune.py +++ b/python/paddle/fluid/tests/unittests/test_prune.py @@ -598,7 +598,7 @@ class TestExecutorRunAutoPrune(unittest.TestCase): fetch_list=[loss1.name], use_prune=False) weight2 = np.array(scope.find_var(w1_param_attrs.name).get_tensor()) - self.assertTrue(np.allclose(weight1, weight2)) + np.testing.assert_allclose(weight1, weight2, rtol=1e-05) def test_prune_program_with_tupe_in_fetch_list(self): ''' diff --git a/python/paddle/fluid/tests/unittests/test_psroi_pool_op.py b/python/paddle/fluid/tests/unittests/test_psroi_pool_op.py index 3e3529b2240..2fecf667982 100644 --- a/python/paddle/fluid/tests/unittests/test_psroi_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_psroi_pool_op.py @@ -177,7 +177,7 @@ class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase): output_size).numpy() expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10, 1.0, 7, 7) - self.assertTrue(np.allclose(out, expect_out)) + np.testing.assert_allclose(out, expect_out, rtol=1e-05) def test_output_size_is_tuple(): output_size = (7, 7) @@ -187,7 +187,7 @@ class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase): output_size).numpy() expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10, 1.0, 7, 7) - self.assertTrue(np.allclose(out, expect_out)) + np.testing.assert_allclose(out, expect_out, rtol=1e-05) def test_dytype_is_float64(): output_size = (7, 7) @@ -197,7 +197,7 @@ class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase): paddle.to_tensor(self.boxes_num, 'int32'), output_size).numpy() expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 10, 1.0, 7, 7) - self.assertTrue(np.allclose(out, expect_out)) + np.testing.assert_allclose(out, expect_out, rtol=1e-05) places = ['cpu'] if paddle.fluid.core.is_compiled_with_cuda(): @@ -226,7 +226,7 @@ class TestPSROIPoolDynamicClassAPI(unittest.TestCase): paddle.to_tensor(self.boxes_num)).numpy() expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2, 1.1, 8, 8) - self.assertTrue(np.allclose(out, expect_out)) + np.testing.assert_allclose(out, expect_out, rtol=1e-05) def test_output_size_is_tuple(): psroi_pool_module = paddle.vision.ops.PSRoIPool(8, 1.1) @@ -235,7 +235,7 @@ class TestPSROIPoolDynamicClassAPI(unittest.TestCase): paddle.to_tensor(self.boxes_num)).numpy() expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2, 1.1, 8, 8) - self.assertTrue(np.allclose(out, expect_out)) + np.testing.assert_allclose(out, expect_out, rtol=1e-05) def test_dytype_is_float64(): psroi_pool_module = paddle.vision.ops.PSRoIPool(8, 1.1) @@ -245,7 +245,7 @@ class TestPSROIPoolDynamicClassAPI(unittest.TestCase): 'int32')).numpy() expect_out = calc_psroi_pool(self.x, self.boxes, self.boxes_num, 2, 1.1, 8, 8) - self.assertTrue(np.allclose(out, expect_out)) + np.testing.assert_allclose(out, expect_out, rtol=1e-05) paddle.disable_static() places = ['cpu'] diff --git a/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py b/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py index bbc383eaf61..310a46b6899 100644 --- a/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py +++ b/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py @@ -107,7 +107,7 @@ class TestPutAlongAxisAPI(unittest.TestCase): out_ref = self.x_np for out in res: - self.assertEqual(np.allclose(out, out_ref, rtol=1e-03), True) + np.testing.assert_allclose(out, out_ref, rtol=0.001) for place in self.place: run(place) @@ -125,8 +125,7 @@ class TestPutAlongAxisAPI(unittest.TestCase): np.put_along_axis(self.x_np, self.index_np, self.value_np, self.axis)) out_ref = self.x_np - self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-03), - True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.001) # for ci coverage, numpy put_along_axis did not support argument of 'reduce' paddle.put_along_axis(x_tensor, index_tensor, value_tensor, @@ -154,8 +153,7 @@ class TestPutAlongAxisAPI(unittest.TestCase): self.axis)) out_ref = self.x_np - self.assertEqual(np.allclose(x_tensor.numpy(), out_ref, rtol=1e-03), - True) + np.testing.assert_allclose(x_tensor.numpy(), out_ref, rtol=0.001) paddle.enable_static() for place in self.place: diff --git a/python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py b/python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py index ac5ca3d9a1b..3144e5da556 100644 --- a/python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py +++ b/python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py @@ -28,7 +28,7 @@ class TestBF16DataType(unittest.TestCase): b_fp32 = b_bf16.astype(np.float32) c_fp32 = np.matmul(a_fp32, b_fp32) - self.assertTrue(np.allclose(c_bf16, c_fp32)) + np.testing.assert_allclose(c_bf16, c_fp32, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_qr_op.py b/python/paddle/fluid/tests/unittests/test_qr_op.py index 290ce391512..acc5ea50cce 100644 --- a/python/paddle/fluid/tests/unittests/test_qr_op.py +++ b/python/paddle/fluid/tests/unittests/test_qr_op.py @@ -168,11 +168,11 @@ class TestQrAPI(unittest.TestCase): x = paddle.to_tensor(a, dtype=dtype) if mode == "r": r = paddle.linalg.qr(x, mode=mode) - self.assertTrue(np.allclose(r, np_r, atol=1e-5)) + np.testing.assert_allclose(r, np_r, rtol=1e-05, atol=1e-05) else: q, r = paddle.linalg.qr(x, mode=mode) - self.assertTrue(np.allclose(q, np_q, atol=1e-5)) - self.assertTrue(np.allclose(r, np_r, atol=1e-5)) + np.testing.assert_allclose(q, np_q, rtol=1e-05, atol=1e-05) + np.testing.assert_allclose(r, np_r, rtol=1e-05, atol=1e-05) tensor_shapes = [ (3, 5), @@ -239,18 +239,24 @@ class TestQrAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": a}, fetch_list=[r]) - self.assertTrue(np.allclose(fetches[0], np_r, - atol=1e-5)) + np.testing.assert_allclose(fetches[0], + np_r, + rtol=1e-05, + atol=1e-05) else: q, r = paddle.linalg.qr(x, mode=mode) exe = fluid.Executor(place) fetches = exe.run(fluid.default_main_program(), feed={"input": a}, fetch_list=[q, r]) - self.assertTrue(np.allclose(fetches[0], np_q, - atol=1e-5)) - self.assertTrue(np.allclose(fetches[1], np_r, - atol=1e-5)) + np.testing.assert_allclose(fetches[0], + np_q, + rtol=1e-05, + atol=1e-05) + np.testing.assert_allclose(fetches[1], + np_r, + rtol=1e-05, + atol=1e-05) tensor_shapes = [ (3, 5), diff --git a/python/paddle/fluid/tests/unittests/test_quantile_and_nanquantile.py b/python/paddle/fluid/tests/unittests/test_quantile_and_nanquantile.py index 3831abd1673..96e0fdbb362 100644 --- a/python/paddle/fluid/tests/unittests/test_quantile_and_nanquantile.py +++ b/python/paddle/fluid/tests/unittests/test_quantile_and_nanquantile.py @@ -1,274 +1,282 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest -import numpy as np -import paddle - -API_list = [(paddle.quantile, np.quantile), - (paddle.nanquantile, np.nanquantile)] - - -class TestQuantileAndNanquantile(unittest.TestCase): - """ - This class is used for numerical precision testing. If there is - a corresponding numpy API, the precision comparison can be performed directly. - Otherwise, it needs to be verified by numpy implementated function. - """ - - def setUp(self): - self.input_data = np.random.rand(4, 7, 6) - - # Test correctness when q and axis are set. - def test_single_q(self): - inp = self.input_data - for (func, res_func) in API_list: - x = paddle.to_tensor(inp) - paddle_res = func(x, q=0.5, axis=2) - np_res = res_func(inp, q=0.5, axis=2) - self.assertTrue(np.allclose(paddle_res.numpy(), np_res)) - inp[0, 1, 2] = np.nan - - # Test correctness for default axis. - def test_with_no_axis(self): - inp = self.input_data - for (func, res_func) in API_list: - x = paddle.to_tensor(inp) - paddle_res = func(x, q=0.35) - np_res = res_func(inp, q=0.35) - self.assertTrue(np.allclose(paddle_res.numpy(), np_res)) - inp[0, 2, 1] = np.nan - inp[0, 1, 2] = np.nan - - # Test correctness for multiple axis. - def test_with_multi_axis(self): - inp = self.input_data - for (func, res_func) in API_list: - x = paddle.to_tensor(inp) - paddle_res = func(x, q=0.75, axis=[0, 2]) - np_res = res_func(inp, q=0.75, axis=[0, 2]) - self.assertTrue(np.allclose(paddle_res.numpy(), np_res)) - inp[0, 5, 3] = np.nan - inp[0, 6, 2] = np.nan - - # Test correctness when keepdim is set. - def test_with_keepdim(self): - inp = self.input_data - for (func, res_func) in API_list: - x = paddle.to_tensor(inp) - paddle_res = func(x, q=0.35, axis=2, keepdim=True) - np_res = res_func(inp, q=0.35, axis=2, keepdims=True) - self.assertTrue(np.allclose(paddle_res.numpy(), np_res)) - inp[0, 3, 4] = np.nan - - # Test correctness when all parameters are set. - def test_with_keepdim_and_multiple_axis(self): - inp = self.input_data - for (func, res_func) in API_list: - x = paddle.to_tensor(inp) - paddle_res = func(x, q=0.1, axis=[1, 2], keepdim=True) - np_res = res_func(inp, q=0.1, axis=[1, 2], keepdims=True) - self.assertTrue(np.allclose(paddle_res.numpy(), np_res)) - inp[0, 6, 3] = np.nan - - # Test correctness when q = 0. - def test_with_boundary_q(self): - inp = self.input_data - for (func, res_func) in API_list: - x = paddle.to_tensor(inp) - paddle_res = func(x, q=0, axis=1) - np_res = res_func(inp, q=0, axis=1) - self.assertTrue(np.allclose(paddle_res.numpy(), np_res)) - inp[0, 2, 5] = np.nan - - # Test correctness when input includes NaN. - def test_quantile_include_NaN(self): - input_data = np.random.randn(2, 3, 4) - input_data[0, 1, 1] = np.nan - x = paddle.to_tensor(input_data) - paddle_res = paddle.quantile(x, q=0.35, axis=0) - np_res = np.quantile(input_data, q=0.35, axis=0) - self.assertTrue(np.allclose(paddle_res.numpy(), np_res, equal_nan=True)) - - # Test correctness when input filled with NaN. - def test_nanquantile_all_NaN(self): - input_data = np.full(shape=[2, 3], fill_value=np.nan) - input_data[0, 2] = 0 - x = paddle.to_tensor(input_data) - paddle_res = paddle.nanquantile(x, q=0.35, axis=0) - np_res = np.nanquantile(input_data, q=0.35, axis=0) - self.assertTrue(np.allclose(paddle_res.numpy(), np_res, equal_nan=True)) - - -class TestMuitlpleQ(unittest.TestCase): - """ - This class is used to test multiple input of q. - """ - - def setUp(self): - self.input_data = np.random.rand(5, 3, 4) - - def test_quantile(self): - x = paddle.to_tensor(self.input_data) - paddle_res = paddle.quantile(x, q=[0.3, 0.44], axis=-2) - np_res = np.quantile(self.input_data, q=[0.3, 0.44], axis=-2) - self.assertTrue(np.allclose(paddle_res.numpy(), np_res)) - - def test_quantile_multiple_axis(self): - x = paddle.to_tensor(self.input_data) - paddle_res = paddle.quantile(x, q=[0.2, 0.67], axis=[1, -1]) - np_res = np.quantile(self.input_data, q=[0.2, 0.67], axis=[1, -1]) - self.assertTrue(np.allclose(paddle_res.numpy(), np_res)) - - def test_quantile_multiple_axis_keepdim(self): - x = paddle.to_tensor(self.input_data) - paddle_res = paddle.quantile(x, - q=[0.1, 0.2, 0.3], - axis=[1, 2], - keepdim=True) - np_res = np.quantile(self.input_data, - q=[0.1, 0.2, 0.3], - axis=[1, 2], - keepdims=True) - self.assertTrue(np.allclose(paddle_res.numpy(), np_res)) - - -class TestError(unittest.TestCase): - """ - This class is used to test that exceptions are thrown correctly. - Validity of all parameter values and types should be considered. - """ - - def setUp(self): - self.x = paddle.randn((2, 3, 4)) - - def test_errors(self): - # Test error when q > 1 - def test_q_range_error_1(): - paddle_res = paddle.quantile(self.x, q=1.5) - - self.assertRaises(ValueError, test_q_range_error_1) - - # Test error when q < 0 - def test_q_range_error_2(): - paddle_res = paddle.quantile(self.x, q=[0.2, -0.3]) - - self.assertRaises(ValueError, test_q_range_error_2) - - # Test error with no valid q - def test_q_range_error_3(): - paddle_res = paddle.quantile(self.x, q=[]) - - self.assertRaises(ValueError, test_q_range_error_3) - - # Test error when x is not Tensor - def test_x_type_error(): - x = [1, 3, 4] - paddle_res = paddle.quantile(x, q=0.9) - - self.assertRaises(TypeError, test_x_type_error) - - # Test error when scalar axis is not int - def test_axis_type_error_1(): - paddle_res = paddle.quantile(self.x, q=0.4, axis=0.4) - - self.assertRaises(ValueError, test_axis_type_error_1) - - # Test error when axis in List is not int - def test_axis_type_error_2(): - paddle_res = paddle.quantile(self.x, q=0.4, axis=[1, 0.4]) - - self.assertRaises(ValueError, test_axis_type_error_2) - - # Test error when axis not in [-D, D) - def test_axis_value_error_1(): - paddle_res = paddle.quantile(self.x, q=0.4, axis=10) - - self.assertRaises(ValueError, test_axis_value_error_1) - - # Test error when axis not in [-D, D) - def test_axis_value_error_2(): - paddle_res = paddle.quantile(self.x, q=0.4, axis=[1, -10]) - - self.assertRaises(ValueError, test_axis_value_error_2) - - # Test error with no valid axis - def test_axis_value_error_3(): - paddle_res = paddle.quantile(self.x, q=0.4, axis=[]) - - self.assertRaises(ValueError, test_axis_value_error_3) - - -class TestQuantileRuntime(unittest.TestCase): - """ - This class is used to test the API could run correctly with - different devices, different data types, and dygraph/static mode. - """ - - def setUp(self): - self.input_data = np.random.rand(4, 7) - self.dtypes = ['float32', 'float64'] - self.devices = ['cpu'] - if paddle.device.is_compiled_with_cuda(): - self.devices.append('gpu') - - def test_dygraph(self): - paddle.disable_static() - for (func, res_func) in API_list: - for device in self.devices: - # Check different devices - paddle.set_device(device) - for dtype in self.dtypes: - # Check different dtypes - np_input_data = self.input_data.astype(dtype) - x = paddle.to_tensor(np_input_data, dtype=dtype) - paddle_res = func(x, q=0.5, axis=1) - np_res = res_func(np_input_data, q=0.5, axis=1) - self.assertTrue(np.allclose(paddle_res.numpy(), np_res)) - - def test_static(self): - paddle.enable_static() - for (func, res_func) in API_list: - for device in self.devices: - x = paddle.static.data(name="x", - shape=self.input_data.shape, - dtype=paddle.float32) - x_fp64 = paddle.static.data(name="x_fp64", - shape=self.input_data.shape, - dtype=paddle.float64) - - results = func(x, q=0.5, axis=1) - np_input_data = self.input_data.astype('float32') - results_fp64 = func(x_fp64, q=0.5, axis=1) - np_input_data_fp64 = self.input_data.astype('float64') - - exe = paddle.static.Executor(device) - paddle_res, paddle_res_fp64 = exe.run( - paddle.static.default_main_program(), - feed={ - "x": np_input_data, - "x_fp64": np_input_data_fp64 - }, - fetch_list=[results, results_fp64]) - np_res = res_func(np_input_data, q=0.5, axis=1) - np_res_fp64 = res_func(np_input_data_fp64, q=0.5, axis=1) - self.assertTrue( - np.allclose(paddle_res, np_res) - and np.allclose(paddle_res_fp64, np_res_fp64)) - - -if __name__ == '__main__': - unittest.main() +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import paddle + +API_list = [(paddle.quantile, np.quantile), + (paddle.nanquantile, np.nanquantile)] + + +class TestQuantileAndNanquantile(unittest.TestCase): + """ + This class is used for numerical precision testing. If there is + a corresponding numpy API, the precision comparison can be performed directly. + Otherwise, it needs to be verified by numpy implementated function. + """ + + def setUp(self): + self.input_data = np.random.rand(4, 7, 6) + + # Test correctness when q and axis are set. + def test_single_q(self): + inp = self.input_data + for (func, res_func) in API_list: + x = paddle.to_tensor(inp) + paddle_res = func(x, q=0.5, axis=2) + np_res = res_func(inp, q=0.5, axis=2) + np.testing.assert_allclose(paddle_res.numpy(), np_res, rtol=1e-05) + inp[0, 1, 2] = np.nan + + # Test correctness for default axis. + def test_with_no_axis(self): + inp = self.input_data + for (func, res_func) in API_list: + x = paddle.to_tensor(inp) + paddle_res = func(x, q=0.35) + np_res = res_func(inp, q=0.35) + np.testing.assert_allclose(paddle_res.numpy(), np_res, rtol=1e-05) + inp[0, 2, 1] = np.nan + inp[0, 1, 2] = np.nan + + # Test correctness for multiple axis. + def test_with_multi_axis(self): + inp = self.input_data + for (func, res_func) in API_list: + x = paddle.to_tensor(inp) + paddle_res = func(x, q=0.75, axis=[0, 2]) + np_res = res_func(inp, q=0.75, axis=[0, 2]) + np.testing.assert_allclose(paddle_res.numpy(), np_res, rtol=1e-05) + inp[0, 5, 3] = np.nan + inp[0, 6, 2] = np.nan + + # Test correctness when keepdim is set. + def test_with_keepdim(self): + inp = self.input_data + for (func, res_func) in API_list: + x = paddle.to_tensor(inp) + paddle_res = func(x, q=0.35, axis=2, keepdim=True) + np_res = res_func(inp, q=0.35, axis=2, keepdims=True) + np.testing.assert_allclose(paddle_res.numpy(), np_res, rtol=1e-05) + inp[0, 3, 4] = np.nan + + # Test correctness when all parameters are set. + def test_with_keepdim_and_multiple_axis(self): + inp = self.input_data + for (func, res_func) in API_list: + x = paddle.to_tensor(inp) + paddle_res = func(x, q=0.1, axis=[1, 2], keepdim=True) + np_res = res_func(inp, q=0.1, axis=[1, 2], keepdims=True) + np.testing.assert_allclose(paddle_res.numpy(), np_res, rtol=1e-05) + inp[0, 6, 3] = np.nan + + # Test correctness when q = 0. + def test_with_boundary_q(self): + inp = self.input_data + for (func, res_func) in API_list: + x = paddle.to_tensor(inp) + paddle_res = func(x, q=0, axis=1) + np_res = res_func(inp, q=0, axis=1) + np.testing.assert_allclose(paddle_res.numpy(), np_res, rtol=1e-05) + inp[0, 2, 5] = np.nan + + # Test correctness when input includes NaN. + def test_quantile_include_NaN(self): + input_data = np.random.randn(2, 3, 4) + input_data[0, 1, 1] = np.nan + x = paddle.to_tensor(input_data) + paddle_res = paddle.quantile(x, q=0.35, axis=0) + np_res = np.quantile(input_data, q=0.35, axis=0) + np.testing.assert_allclose(paddle_res.numpy(), + np_res, + rtol=1e-05, + equal_nan=True) + + # Test correctness when input filled with NaN. + def test_nanquantile_all_NaN(self): + input_data = np.full(shape=[2, 3], fill_value=np.nan) + input_data[0, 2] = 0 + x = paddle.to_tensor(input_data) + paddle_res = paddle.nanquantile(x, q=0.35, axis=0) + np_res = np.nanquantile(input_data, q=0.35, axis=0) + np.testing.assert_allclose(paddle_res.numpy(), + np_res, + rtol=1e-05, + equal_nan=True) + + +class TestMuitlpleQ(unittest.TestCase): + """ + This class is used to test multiple input of q. + """ + + def setUp(self): + self.input_data = np.random.rand(5, 3, 4) + + def test_quantile(self): + x = paddle.to_tensor(self.input_data) + paddle_res = paddle.quantile(x, q=[0.3, 0.44], axis=-2) + np_res = np.quantile(self.input_data, q=[0.3, 0.44], axis=-2) + np.testing.assert_allclose(paddle_res.numpy(), np_res, rtol=1e-05) + + def test_quantile_multiple_axis(self): + x = paddle.to_tensor(self.input_data) + paddle_res = paddle.quantile(x, q=[0.2, 0.67], axis=[1, -1]) + np_res = np.quantile(self.input_data, q=[0.2, 0.67], axis=[1, -1]) + np.testing.assert_allclose(paddle_res.numpy(), np_res, rtol=1e-05) + + def test_quantile_multiple_axis_keepdim(self): + x = paddle.to_tensor(self.input_data) + paddle_res = paddle.quantile(x, + q=[0.1, 0.2, 0.3], + axis=[1, 2], + keepdim=True) + np_res = np.quantile(self.input_data, + q=[0.1, 0.2, 0.3], + axis=[1, 2], + keepdims=True) + np.testing.assert_allclose(paddle_res.numpy(), np_res, rtol=1e-05) + + +class TestError(unittest.TestCase): + """ + This class is used to test that exceptions are thrown correctly. + Validity of all parameter values and types should be considered. + """ + + def setUp(self): + self.x = paddle.randn((2, 3, 4)) + + def test_errors(self): + # Test error when q > 1 + def test_q_range_error_1(): + paddle_res = paddle.quantile(self.x, q=1.5) + + self.assertRaises(ValueError, test_q_range_error_1) + + # Test error when q < 0 + def test_q_range_error_2(): + paddle_res = paddle.quantile(self.x, q=[0.2, -0.3]) + + self.assertRaises(ValueError, test_q_range_error_2) + + # Test error with no valid q + def test_q_range_error_3(): + paddle_res = paddle.quantile(self.x, q=[]) + + self.assertRaises(ValueError, test_q_range_error_3) + + # Test error when x is not Tensor + def test_x_type_error(): + x = [1, 3, 4] + paddle_res = paddle.quantile(x, q=0.9) + + self.assertRaises(TypeError, test_x_type_error) + + # Test error when scalar axis is not int + def test_axis_type_error_1(): + paddle_res = paddle.quantile(self.x, q=0.4, axis=0.4) + + self.assertRaises(ValueError, test_axis_type_error_1) + + # Test error when axis in List is not int + def test_axis_type_error_2(): + paddle_res = paddle.quantile(self.x, q=0.4, axis=[1, 0.4]) + + self.assertRaises(ValueError, test_axis_type_error_2) + + # Test error when axis not in [-D, D) + def test_axis_value_error_1(): + paddle_res = paddle.quantile(self.x, q=0.4, axis=10) + + self.assertRaises(ValueError, test_axis_value_error_1) + + # Test error when axis not in [-D, D) + def test_axis_value_error_2(): + paddle_res = paddle.quantile(self.x, q=0.4, axis=[1, -10]) + + self.assertRaises(ValueError, test_axis_value_error_2) + + # Test error with no valid axis + def test_axis_value_error_3(): + paddle_res = paddle.quantile(self.x, q=0.4, axis=[]) + + self.assertRaises(ValueError, test_axis_value_error_3) + + +class TestQuantileRuntime(unittest.TestCase): + """ + This class is used to test the API could run correctly with + different devices, different data types, and dygraph/static mode. + """ + + def setUp(self): + self.input_data = np.random.rand(4, 7) + self.dtypes = ['float32', 'float64'] + self.devices = ['cpu'] + if paddle.device.is_compiled_with_cuda(): + self.devices.append('gpu') + + def test_dygraph(self): + paddle.disable_static() + for (func, res_func) in API_list: + for device in self.devices: + # Check different devices + paddle.set_device(device) + for dtype in self.dtypes: + # Check different dtypes + np_input_data = self.input_data.astype(dtype) + x = paddle.to_tensor(np_input_data, dtype=dtype) + paddle_res = func(x, q=0.5, axis=1) + np_res = res_func(np_input_data, q=0.5, axis=1) + np.testing.assert_allclose(paddle_res.numpy(), + np_res, + rtol=1e-05) + + def test_static(self): + paddle.enable_static() + for (func, res_func) in API_list: + for device in self.devices: + x = paddle.static.data(name="x", + shape=self.input_data.shape, + dtype=paddle.float32) + x_fp64 = paddle.static.data(name="x_fp64", + shape=self.input_data.shape, + dtype=paddle.float64) + + results = func(x, q=0.5, axis=1) + np_input_data = self.input_data.astype('float32') + results_fp64 = func(x_fp64, q=0.5, axis=1) + np_input_data_fp64 = self.input_data.astype('float64') + + exe = paddle.static.Executor(device) + paddle_res, paddle_res_fp64 = exe.run( + paddle.static.default_main_program(), + feed={ + "x": np_input_data, + "x_fp64": np_input_data_fp64 + }, + fetch_list=[results, results_fp64]) + np_res = res_func(np_input_data, q=0.5, axis=1) + np_res_fp64 = res_func(np_input_data_fp64, q=0.5, axis=1) + self.assertTrue( + np.allclose(paddle_res, np_res) + and np.allclose(paddle_res_fp64, np_res_fp64)) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_rad2deg.py b/python/paddle/fluid/tests/unittests/test_rad2deg.py index 0299884a8bb..aabd66b10cb 100644 --- a/python/paddle/fluid/tests/unittests/test_rad2deg.py +++ b/python/paddle/fluid/tests/unittests/test_rad2deg.py @@ -53,7 +53,7 @@ class TestRad2degAPI(unittest.TestCase): paddle.disable_static() x1 = paddle.to_tensor([3.142, -3.142, 6.283, -6.283, 1.570, -1.570]) result1 = paddle.rad2deg(x1) - self.assertEqual(np.allclose(self.out_np, result1.numpy()), True) + np.testing.assert_allclose(self.out_np, result1.numpy(), rtol=1e-05) paddle.enable_static() @@ -71,7 +71,7 @@ class TestRad2degAPI2(TestRad2degAPI): x2 = paddle.to_tensor(np.pi / 2) result2 = paddle.rad2deg(x2) - self.assertEqual(np.allclose(90, result2.numpy()), True) + np.testing.assert_allclose(90, result2.numpy(), rtol=1e-05) paddle.enable_static() @@ -89,6 +89,6 @@ class TestRad2degAPI3(TestRad2degAPI): x2 = paddle.to_tensor(1) result2 = paddle.rad2deg(x2) - self.assertEqual(np.allclose(180 / np.pi, result2.numpy()), True) + np.testing.assert_allclose(180 / np.pi, result2.numpy(), rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_randint_op.py b/python/paddle/fluid/tests/unittests/test_randint_op.py index c3b4d02af1d..0e659b96d52 100644 --- a/python/paddle/fluid/tests/unittests/test_randint_op.py +++ b/python/paddle/fluid/tests/unittests/test_randint_op.py @@ -51,8 +51,7 @@ class TestRandintOp(OpTest): def verify_output(self, outs): hist, prob = self.output_hist(np.array(outs[0])) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.001), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001) def test_check_output_eager(self): with _test_eager_guard(): @@ -102,8 +101,7 @@ class TestRandintOp_attr_tensorlist(OpTest): def verify_output(self, outs): hist, prob = self.output_hist(np.array(outs[0])) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.001), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001) def test_check_output_eager(self): with _test_eager_guard(): @@ -127,8 +125,7 @@ class TestRandint_attr_tensor(OpTest): def verify_output(self, outs): hist, prob = self.output_hist(np.array(outs[0])) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.001), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.001) def test_check_output_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_random_seed.py b/python/paddle/fluid/tests/unittests/test_random_seed.py index f4d16a0a81e..b20d9a119f7 100644 --- a/python/paddle/fluid/tests/unittests/test_random_seed.py +++ b/python/paddle/fluid/tests/unittests/test_random_seed.py @@ -63,8 +63,8 @@ class TestGeneratorSeed(unittest.TestCase): x3_np = x3.numpy() if not core.is_compiled_with_cuda(): - self.assertTrue(np.allclose(x1_np, x2_np)) - self.assertTrue(np.allclose(x_np, x3_np)) + np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) + np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) def test_generator_uniform_random_static(self): fluid.disable_dygraph() @@ -96,8 +96,8 @@ class TestGeneratorSeed(unittest.TestCase): out2_res2 = np.array(out2[1]) if not core.is_compiled_with_cuda(): - self.assertTrue(np.allclose(out1_res1, out2_res1)) - self.assertTrue(np.allclose(out1_res2, out2_res2)) + np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05) + np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05) self.assertTrue(not np.allclose(out1_res2, out1_res1)) def test_gen_dropout_dygraph(self): @@ -123,7 +123,7 @@ class TestGeneratorSeed(unittest.TestCase): if not core.is_compiled_with_cuda(): print(">>>>>>> dropout dygraph >>>>>>>") - self.assertTrue(np.allclose(y_np, y1_np)) + np.testing.assert_allclose(y_np, y1_np, rtol=1e-05) def test_gen_dropout_static(self): fluid.disable_dygraph() @@ -148,7 +148,7 @@ class TestGeneratorSeed(unittest.TestCase): if not core.is_compiled_with_cuda(): print(">>>>>>> dropout static >>>>>>>") - self.assertTrue(np.allclose(out1_np, out2_np)) + np.testing.assert_allclose(out1_np, out2_np, rtol=1e-05) def test_generator_gaussian_random_dygraph(self): """Test Generator seed.""" @@ -169,8 +169,8 @@ class TestGeneratorSeed(unittest.TestCase): if not core.is_compiled_with_cuda(): print(">>>>>>> gaussian random dygraph >>>>>>>") - self.assertTrue(np.allclose(x1_np, x2_np)) - self.assertTrue(np.allclose(x_np, x3_np)) + np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) + np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) def test_generator_gaussian_random_static(self): fluid.disable_dygraph() @@ -203,8 +203,8 @@ class TestGeneratorSeed(unittest.TestCase): if not core.is_compiled_with_cuda(): print(">>>>>>> gaussian random static >>>>>>>") - self.assertTrue(np.allclose(out1_res1, out2_res1)) - self.assertTrue(np.allclose(out1_res2, out2_res2)) + np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05) + np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05) self.assertTrue(not np.allclose(out1_res2, out1_res1)) def test_generator_randint_dygraph(self): @@ -228,8 +228,8 @@ class TestGeneratorSeed(unittest.TestCase): if not core.is_compiled_with_cuda(): print(">>>>>>> randint dygraph >>>>>>>") - self.assertTrue(np.allclose(x1_np, x2_np)) - self.assertTrue(np.allclose(x_np, x3_np)) + np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) + np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) def test_generator_uniform_random_static_1(self): fluid.disable_dygraph() @@ -261,8 +261,8 @@ class TestGeneratorSeed(unittest.TestCase): out2_res2 = np.array(out2[1]) if not core.is_compiled_with_cuda(): - self.assertTrue(np.allclose(out1_res1, out2_res1)) - self.assertTrue(np.allclose(out1_res2, out2_res2)) + np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05) + np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05) self.assertTrue(not np.allclose(out1_res2, out1_res1)) def test_generator_randint_dygraph_1(self): @@ -282,8 +282,8 @@ class TestGeneratorSeed(unittest.TestCase): x2_np = x2.numpy() x3_np = x3.numpy() if not core.is_compiled_with_cuda(): - self.assertTrue(np.allclose(x1_np, x2_np)) - self.assertTrue(np.allclose(x_np, x3_np)) + np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) + np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) def test_generator_ranint_static(self): fluid.disable_dygraph() @@ -316,8 +316,8 @@ class TestGeneratorSeed(unittest.TestCase): if not core.is_compiled_with_cuda(): print(">>>>>>> randint static >>>>>>>") - self.assertTrue(np.allclose(out1_res1, out2_res1)) - self.assertTrue(np.allclose(out1_res2, out2_res2)) + np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05) + np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05) self.assertTrue(not np.allclose(out1_res2, out1_res1)) def test_generator_randperm_dygraph(self): @@ -340,8 +340,8 @@ class TestGeneratorSeed(unittest.TestCase): if not core.is_compiled_with_cuda(): print(">>>>>>> randperm dygraph >>>>>>>") - self.assertTrue(np.allclose(x1_np, x2_np)) - self.assertTrue(np.allclose(x_np, x3_np)) + np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) + np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) def test_generator_randperm_static(self): @@ -375,8 +375,8 @@ class TestGeneratorSeed(unittest.TestCase): if not core.is_compiled_with_cuda(): print(">>>>>>> randperm static >>>>>>>") - self.assertTrue(np.allclose(out1_res1, out2_res1)) - self.assertTrue(np.allclose(out1_res2, out2_res2)) + np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05) + np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05) self.assertTrue(not np.allclose(out1_res2, out1_res1)) def test_generator_sampling_id_dygraph(self): @@ -420,8 +420,8 @@ class TestGeneratorSeed(unittest.TestCase): if not core.is_compiled_with_cuda(): print(">>>>>>> sampling id dygraph >>>>>>>") - self.assertTrue(np.allclose(x1_np, x2_np)) - self.assertTrue(np.allclose(x_np, x3_np)) + np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) + np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) def test_generator_randperm_static_1(self): @@ -456,8 +456,8 @@ class TestGeneratorSeed(unittest.TestCase): if not core.is_compiled_with_cuda(): print(">>>>>>> sampling id static >>>>>>>") - self.assertTrue(np.allclose(out1_res1, out2_res1)) - self.assertTrue(np.allclose(out1_res2, out2_res2)) + np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05) + np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05) self.assertTrue(not np.allclose(out1_res2, out1_res1)) def test_gen_TruncatedNormal_initializer(self): @@ -503,8 +503,8 @@ class TestGeneratorSeed(unittest.TestCase): if not core.is_compiled_with_cuda(): print(">>>>>>> sampling id static >>>>>>>") - self.assertTrue(np.allclose(out1_res1, out2_res1)) - self.assertTrue(np.allclose(out1_res2, out2_res2)) + np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05) + np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05) self.assertTrue(not np.allclose(out1_res2, out1_res1)) diff --git a/python/paddle/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_recurrent_op.py index 0acd0ac398e..b799b116c1d 100644 --- a/python/paddle/fluid/tests/unittests/test_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_recurrent_op.py @@ -200,17 +200,21 @@ class RecurrentOpTest1(unittest.TestCase): num_grad = self.get_numerical_gradient() for idx, name in enumerate(self.grad_data_field): self.assertEqual(num_grad[idx].shape, ana_grad[idx].shape) - self.assertTrue( - np.isclose(num_grad[idx], ana_grad[idx], rtol=rtol).all(), - "num_grad (" + name + ") has diff at " + str(self.place) + - "\nExpect " + str(num_grad[idx]) + "\n" + "But Got" + - str(ana_grad[idx]) + " in class " + self.__class__.__name__) + np.testing.assert_allclose( + num_grad[idx], + ana_grad[idx], + rtol=rtol, + atol=1e-8, + err_msg='num_grad (' + name + ') has diff at ' + + str(self.place) + '\nExpect ' + str(num_grad[idx]) + '\n' + + 'But Got' + str(ana_grad[idx]) + ' in class ' + + self.__class__.__name__) def check_forward(self): pd_output = self.forward() py_output = self.py_rnn.forward() self.assertEqual(pd_output.shape, py_output.shape) - self.assertTrue(np.isclose(pd_output, py_output, rtol=0.01).all()) + np.testing.assert_allclose(pd_output, py_output, rtol=0.01) def get_numerical_gradient(self, delta=0.005): dloss_dout = 1.0 diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index 8d0fcc7ae22..59ed776238e 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -882,10 +882,10 @@ class API_TestSumOp(unittest.TestCase): res, = exe.run(feed={"data": input_data}, fetch_list=[result_sum]) - self.assertTrue( - np.allclose(res, - np.sum(input_data.astype(attr_dtype), - axis=np_axis))) + np.testing.assert_allclose(res, + np.sum(input_data.astype(attr_dtype), + axis=np_axis), + rtol=1e-05) def test_static(self): shape = [10, 10] @@ -955,7 +955,7 @@ class TestAllAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": input_np}, fetch_list=[result]) - self.assertTrue(np.allclose(fetches[0], np.all(input_np))) + np.testing.assert_allclose(fetches[0], np.all(input_np), rtol=1e-05) def test_static(self): for place in self.places: @@ -1011,7 +1011,7 @@ class TestAnyAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": input_np}, fetch_list=[result]) - self.assertTrue(np.allclose(fetches[0], np.any(input_np))) + np.testing.assert_allclose(fetches[0], np.any(input_np), rtol=1e-05) def test_static(self): for place in self.places: diff --git a/python/paddle/fluid/tests/unittests/test_regularizer.py b/python/paddle/fluid/tests/unittests/test_regularizer.py index 4a48b6fb1f8..b79167f3b38 100644 --- a/python/paddle/fluid/tests/unittests/test_regularizer.py +++ b/python/paddle/fluid/tests/unittests/test_regularizer.py @@ -297,12 +297,18 @@ class TestRegularizer(unittest.TestCase): fluid.optimizer.SGD(parameter_list=linear2.parameters(), learning_rate=1e-2).minimize(loss2) # they should both be applied by l1, and keep the same - self.assertTrue( - np.allclose(linear1.weight.numpy(), linear2.weight.numpy()), - "weight should use the regularization in fluid.ParamAttr!") - self.assertTrue( - np.allclose(linear1.bias.numpy(), linear2.bias.numpy()), - "bias should use the regularization in fluid.ParamAttr!") + np.testing.assert_allclose( + linear1.weight.numpy(), + linear2.weight.numpy(), + rtol=1e-05, + err_msg= + 'weight should use the regularization in fluid.ParamAttr!') + np.testing.assert_allclose( + linear1.bias.numpy(), + linear2.bias.numpy(), + rtol=1e-05, + err_msg='bias should use the regularization in fluid.ParamAttr!' + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_regularizer_api.py b/python/paddle/fluid/tests/unittests/test_regularizer_api.py index fc46c9c93c3..52e91ad138d 100644 --- a/python/paddle/fluid/tests/unittests/test_regularizer_api.py +++ b/python/paddle/fluid/tests/unittests/test_regularizer_api.py @@ -203,12 +203,18 @@ class TestRegularizer(unittest.TestCase): fluid.optimizer.SGD(parameter_list=linear2.parameters(), learning_rate=1e-2).minimize(loss2) # they should both be applied by l1, and keep the same - self.assertTrue( - np.allclose(linear1.weight.numpy(), linear2.weight.numpy()), - "weight should use the regularization in fluid.ParamAttr!") - self.assertTrue( - np.allclose(linear1.bias.numpy(), linear2.bias.numpy()), - "bias should use the regularization in fluid.ParamAttr!") + np.testing.assert_allclose( + linear1.weight.numpy(), + linear2.weight.numpy(), + rtol=1e-05, + err_msg= + 'weight should use the regularization in fluid.ParamAttr!') + np.testing.assert_allclose( + linear1.bias.numpy(), + linear2.bias.numpy(), + rtol=1e-05, + err_msg='bias should use the regularization in fluid.ParamAttr!' + ) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_renorm_op.py b/python/paddle/fluid/tests/unittests/test_renorm_op.py index d25b0e9d2e5..169edb4b164 100644 --- a/python/paddle/fluid/tests/unittests/test_renorm_op.py +++ b/python/paddle/fluid/tests/unittests/test_renorm_op.py @@ -50,7 +50,7 @@ class TestRenormAPI(unittest.TestCase): [0.60891086, 0.04392857, 0.61500001]], [[0.40594056, -1.17142856, 0.41000000], [0.62920785, 0.54178572, 0.61500001]]]) - self.assertTrue(np.allclose(expected, np.array(res))) + np.testing.assert_allclose(expected, np.array(res), rtol=1e-05) def test_dygraph_api(self): self.input_data() @@ -63,14 +63,16 @@ class TestRenormAPI(unittest.TestCase): [0.60891086, 0.04392857, 0.61500001]], [[0.40594056, -1.17142856, 0.41000000], [0.62920785, 0.54178572, 0.61500001]]]) - self.assertTrue(np.allclose(expected, np.array(y))) + np.testing.assert_allclose(expected, np.array(y), rtol=1e-05) z = paddle.mean(y) z.backward(retain_graph=True) expected_grad = np.array([[[0, 0.01394558, 0.02733333], [0, 0.01394558, 0.00683333]], [[0, 0.01045918, 0.00683333], [0, 0.01394558, 0.00683333]]]) - self.assertTrue(np.allclose(expected_grad, np.array(x.grad))) + np.testing.assert_allclose(expected_grad, + np.array(x.grad), + rtol=1e-05) # #test exception: with fluid.dygraph.guard(): input = [[[2.0, 2, -2], [3, 0.3, 3]], [[2, -8, 2], [3.1, 3.7, 3]]] @@ -92,7 +94,7 @@ class TestRenormAPI(unittest.TestCase): [0.60891086, 0.04392857, 0.61500001]], [[0.40594056, -1.17142856, 0.41000000], [0.62920785, 0.54178572, 0.61500001]]]) - self.assertTrue(np.allclose(expected, np.array(y))) + np.testing.assert_allclose(expected, np.array(y), rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py index 1f6fb37e1e0..8358f9f656c 100644 --- a/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py @@ -19,7 +19,7 @@ import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.layers.control_flow import lod_rank_table from paddle.fluid import Program, program_guard -import numpy +import numpy as np import functools @@ -80,12 +80,12 @@ class TestReorderLoDTensor(unittest.TestCase): data_lod_level = desc[2] data_lod = [] for i in range(data_lod_level): - lod_level_i = numpy.random.randint( + lod_level_i = np.random.randint( low=1, high=5, size=self.num_seq if i == 0 else sum(lod_level_i)).tolist() data_lod.append(lod_level_i) - data_value = numpy.random.random( + data_value = np.random.random( size=[sum(data_lod[-1]) if data_lod else self.num_seq] + data_shape).astype('float32') self.data[data_name] = (data_value, data_lod) @@ -132,7 +132,7 @@ class TestReorderLoDTensor(unittest.TestCase): input_table = [(i, 1, []) for i in range(len(rank_table))] # reorder by rank_table - output_value = numpy.zeros_like(input_value) + output_value = np.zeros_like(input_value) output_lod = [] offset = 0 for index, length in rank_table: @@ -158,19 +158,15 @@ class TestReorderLoDTensor(unittest.TestCase): expect_output, expect_output_lod = self.reorder() for actual_output in self.actual_outputs: self.assertTrue( - numpy.allclose(numpy.array(actual_output), - expect_output, - atol=0.001)) + np.allclose(np.array(actual_output), expect_output, atol=0.001)) self.assertEqual(expect_output_lod, actual_output.recursive_sequence_lengths()) # check gradient - expect_grad = numpy.ones_like(self.data[self.data_desc[0][0]][0]) + expect_grad = np.ones_like(self.data[self.data_desc[0][0]][0]) expect_grad_lod = self.data[self.data_desc[0][0]][1] for actual_grad in self.actual_grads: self.assertTrue( - numpy.allclose(numpy.array(actual_grad), - expect_grad, - atol=0.001)) + np.allclose(np.array(actual_grad), expect_grad, atol=0.001)) self.assertEqual(expect_grad_lod, actual_grad.recursive_sequence_lengths()) @@ -182,19 +178,15 @@ class TestReorderLoDTensor(unittest.TestCase): expect_output, expect_output_lod = self.reorder() for actual_output in self.actual_outputs: self.assertTrue( - numpy.allclose(numpy.array(actual_output), - expect_output, - atol=0.001)) + np.allclose(np.array(actual_output), expect_output, atol=0.001)) self.assertEqual(expect_output_lod, actual_output.recursive_sequence_lengths()) # check gradient - expect_grad = numpy.ones_like(self.data[self.data_desc[0][0]][0]) + expect_grad = np.ones_like(self.data[self.data_desc[0][0]][0]) expect_grad_lod = self.data[self.data_desc[0][0]][1] for actual_grad in self.actual_grads: self.assertTrue( - numpy.allclose(numpy.array(actual_grad), - expect_grad, - atol=0.001)) + np.allclose(np.array(actual_grad), expect_grad, atol=0.001)) self.assertEqual(expect_grad_lod, actual_grad.recursive_sequence_lengths()) @@ -205,14 +197,12 @@ class TestReorderLoDTensor(unittest.TestCase): input_lod) # preserve the output of LodTensor with implicit lod to compare expect_output = [ - numpy.array(actual_output) for actual_output in self.actual_outputs + np.array(actual_output) for actual_output in self.actual_outputs ] self.run_program() for actual_output in self.actual_outputs: self.assertTrue( - numpy.allclose(numpy.array(actual_output), - expect_output, - atol=0.001)) + np.allclose(np.array(actual_output), expect_output, atol=0.001)) class TestReorderLoDTensorError(unittest.TestCase): @@ -222,9 +212,8 @@ class TestReorderLoDTensorError(unittest.TestCase): def test_Variable(): # The input must be Variable. - x1 = numpy.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") - table1 = numpy.array([0.9383, 0.1983, 3.2, - 1.2]).astype("float64") + x1 = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") + table1 = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") new_dat = fluid.layers.reorder_lod_tensor_by_rank( x=x1, rank_table=table1) diff --git a/python/paddle/fluid/tests/unittests/test_repeat_interleave_op.py b/python/paddle/fluid/tests/unittests/test_repeat_interleave_op.py index 28e49c7d16d..5dee9d75cb9 100644 --- a/python/paddle/fluid/tests/unittests/test_repeat_interleave_op.py +++ b/python/paddle/fluid/tests/unittests/test_repeat_interleave_op.py @@ -131,7 +131,7 @@ class TestIndexSelectAPI(unittest.TestCase): fetch_list=[z.name], return_numpy=False) expect_out = np.repeat(self.data_x, self.data_index, axis=1) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) # case 2: repeats = np.array([1, 2, 1]).astype('int32') @@ -150,7 +150,7 @@ class TestIndexSelectAPI(unittest.TestCase): fetch_list=[z.name], return_numpy=False) expect_out = np.repeat(self.data_x, repeats, axis=0) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) repeats = 2 with program_guard(Program(), Program()): @@ -161,7 +161,7 @@ class TestIndexSelectAPI(unittest.TestCase): fetch_list=[z.name], return_numpy=False) expect_out = np.repeat(self.data_x, repeats, axis=0) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) def test_dygraph_api(self): self.input_data() @@ -175,7 +175,7 @@ class TestIndexSelectAPI(unittest.TestCase): z = paddle.repeat_interleave(x, index, None) np_z = z.numpy() expect_out = np.repeat(input_x, index_x, axis=None) - self.assertTrue(np.allclose(expect_out, np_z)) + np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case repeats int with fluid.dygraph.guard(): @@ -184,7 +184,7 @@ class TestIndexSelectAPI(unittest.TestCase): z = paddle.repeat_interleave(x, index, None) np_z = z.numpy() expect_out = np.repeat(input_x, index, axis=None) - self.assertTrue(np.allclose(expect_out, np_z)) + np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 1: with fluid.dygraph.guard(): @@ -193,7 +193,7 @@ class TestIndexSelectAPI(unittest.TestCase): z = paddle.repeat_interleave(x, index, -1) np_z = z.numpy() expect_out = np.repeat(self.data_x, self.data_index, axis=-1) - self.assertTrue(np.allclose(expect_out, np_z)) + np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(self.data_x) @@ -201,7 +201,7 @@ class TestIndexSelectAPI(unittest.TestCase): z = paddle.repeat_interleave(x, index, 1) np_z = z.numpy() expect_out = np.repeat(self.data_x, self.data_index, axis=1) - self.assertTrue(np.allclose(expect_out, np_z)) + np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 2: index_x = np.array([1, 2, 1]).astype('int32') @@ -211,7 +211,7 @@ class TestIndexSelectAPI(unittest.TestCase): z = paddle.repeat_interleave(x, index, axis=0) np_z = z.numpy() expect_out = np.repeat(self.data_x, index, axis=0) - self.assertTrue(np.allclose(expect_out, np_z)) + np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_reshape_op.py b/python/paddle/fluid/tests/unittests/test_reshape_op.py index d4d89177653..6fe9392a2b6 100755 --- a/python/paddle/fluid/tests/unittests/test_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/test_reshape_op.py @@ -488,7 +488,7 @@ class TestDygraphReshapeAPI(unittest.TestCase): output = self.reshape(x=input, shape=[5, 10]) out_np = output.numpy() expected_out = np.reshape(input_1, newshape=[5, 10]) - self.assertTrue(np.allclose(expected_out, out_np)) + np.testing.assert_allclose(expected_out, out_np, rtol=1e-05) def test_out_uint8(self): paddle.disable_static() @@ -497,7 +497,7 @@ class TestDygraphReshapeAPI(unittest.TestCase): output = self.reshape(x=input, shape=[5, 10]) out_np = output.numpy() expected_out = np.reshape(input_1, newshape=[5, 10]) - self.assertTrue(np.allclose(expected_out, out_np)) + np.testing.assert_allclose(expected_out, out_np, rtol=1e-05) def test_out_float32(self): paddle.disable_static() @@ -506,7 +506,7 @@ class TestDygraphReshapeAPI(unittest.TestCase): output = self.reshape(x=input, shape=[5, 10]) out_np = output.numpy() expected_out = np.reshape(input_1, newshape=[5, 10]) - self.assertTrue(np.allclose(expected_out, out_np)) + np.testing.assert_allclose(expected_out, out_np, rtol=1e-05) class TestDygraphReshapeInplaceAPI(TestDygraphReshapeAPI): diff --git a/python/paddle/fluid/tests/unittests/test_resnet50_with_cinn.py b/python/paddle/fluid/tests/unittests/test_resnet50_with_cinn.py index 4aebad4e87c..bf39f9fa2f9 100644 --- a/python/paddle/fluid/tests/unittests/test_resnet50_with_cinn.py +++ b/python/paddle/fluid/tests/unittests/test_resnet50_with_cinn.py @@ -112,7 +112,7 @@ class TestResnet50Accuracy(unittest.TestCase): print(loss_c) print("Losses of Paddle") print(loss_p) - self.assertTrue(np.allclose(loss_c, loss_p, atol=1e-5)) + np.testing.assert_allclose(loss_c, loss_p, rtol=1e-05, atol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_rmsprop_op.py b/python/paddle/fluid/tests/unittests/test_rmsprop_op.py index eb192fcde6f..ab1eef96657 100644 --- a/python/paddle/fluid/tests/unittests/test_rmsprop_op.py +++ b/python/paddle/fluid/tests/unittests/test_rmsprop_op.py @@ -127,10 +127,13 @@ class TestBase(unittest.TestCase): self.mean_grad_tensor.set(self.mean_grad, place) def check(self, actual_t, expect_t, place, out_name, atol=1e-5): - self.assertTrue( - np.allclose(actual_t, expect_t, atol=atol), - "Output (" + out_name + ") has diff at " + str(place) + - "\nExpect " + str(expect_t) + "\n" + "But Got" + str(actual_t)) + np.testing.assert_allclose( + actual_t, + expect_t, + rtol=1e-05, + atol=atol, + err_msg='Output (' + out_name + ') has diff at ' + str(place) + + '\nExpect ' + str(expect_t) + '\n' + 'But Got' + str(actual_t)) class TestRmspropOp(TestBase): diff --git a/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py b/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py index 527b6c5e2d8..4f56edfba9f 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py @@ -163,7 +163,7 @@ class TestLSTMCell(unittest.TestCase): }, fetch_list=[lstm_hidden_new, lstm_hidden]) - self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4, atol=0)) + np.testing.assert_allclose(out[0], out[1], rtol=0.0001, atol=0) class TestGRUCellError(unittest.TestCase): @@ -277,7 +277,7 @@ class TestGRUCell(unittest.TestCase): }, fetch_list=[gru_hidden_new, gru_hidden]) - self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4, atol=0)) + np.testing.assert_allclose(out[0], out[1], rtol=0.0001, atol=0) class TestRnnError(unittest.TestCase): @@ -437,7 +437,7 @@ class TestRnn(unittest.TestCase): }, fetch_list=[output_new, rnn_out]) - self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4)) + np.testing.assert_allclose(out[0], out[1], rtol=0.0001) class TestRnnUtil(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py index f53df455239..1e49ca732e1 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py @@ -683,11 +683,13 @@ class ModuleApiTest(unittest.TestCase): stgraph_output = self._calc_output(place, mode, dygraph=False) expect_output = getattr(self, "outputs", None) for actual_t, expect_t in zip(dygraph_output, stgraph_output): - self.assertTrue(np.allclose(actual_t, expect_t, rtol=1e-5, atol=0)) + np.testing.assert_allclose(actual_t, expect_t, rtol=1e-05, atol=0) if expect_output: for actual_t, expect_t in zip(dygraph_output, expect_output): - self.assertTrue( - np.allclose(actual_t, expect_t, rtol=1e-5, atol=0)) + np.testing.assert_allclose(actual_t, + expect_t, + rtol=1e-05, + atol=0) def check_output(self): devices = ["CPU", "GPU"] if fluid.is_compiled_with_cuda() else ["CPU"] diff --git a/python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py b/python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py index f5ce0306091..870fc1cdf35 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_memory_helper_op.py @@ -48,7 +48,7 @@ class RNNMemoryHelperOpTest(unittest.TestCase): out = exe.run(self.program, feed=self.feed_map, fetch_list=self.fetch_list) - self.assertTrue(np.allclose(out[0], x_np, rtol=1e-5)) + np.testing.assert_allclose(out[0], x_np, rtol=1e-05) class RNNMemoryHelperGradOpTest(unittest.TestCase): @@ -134,10 +134,9 @@ class RNNMemoryHelperGradOpWithoutInputTest(unittest.TestCase): out = exe.run(self.program, feed=self.feed_map, fetch_list=self.fetch_list) - self.assertTrue( - np.allclose(out[0], - np.zeros(shape=(2, 3)).astype("float32"), - rtol=1e-5)) + np.testing.assert_allclose(out[0], + np.zeros(shape=(2, 3)).astype('float32'), + rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_roll_op.py b/python/paddle/fluid/tests/unittests/test_roll_op.py index 546c278b8fa..e7b44d8a3d1 100644 --- a/python/paddle/fluid/tests/unittests/test_roll_op.py +++ b/python/paddle/fluid/tests/unittests/test_roll_op.py @@ -78,7 +78,7 @@ class TestRollAPI(unittest.TestCase): return_numpy=False) expect_out = np.array([[9.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) # case 2: with program_guard(Program(), Program()): @@ -90,7 +90,7 @@ class TestRollAPI(unittest.TestCase): return_numpy=False) expect_out = np.array([[7.0, 8.0, 9.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) def test_dygraph_api(self): self.input_data() @@ -101,7 +101,7 @@ class TestRollAPI(unittest.TestCase): np_z = z.numpy() expect_out = np.array([[9.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) - self.assertTrue(np.allclose(expect_out, np_z)) + np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 2: with fluid.dygraph.guard(): @@ -110,7 +110,7 @@ class TestRollAPI(unittest.TestCase): np_z = z.numpy() expect_out = np.array([[7.0, 8.0, 9.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) - self.assertTrue(np.allclose(expect_out, np_z)) + np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) def test_roll_op_false(self): self.input_data() @@ -134,7 +134,7 @@ class TestRollAPI(unittest.TestCase): axes = [0, 1] out = paddle.roll(x, shifts=shifts, axis=axes).numpy() expected_out = np.array([[8, 6, 7], [2, 0, 1], [5, 3, 4]]) - self.assertTrue(np.allclose(out, expected_out)) + np.testing.assert_allclose(out, expected_out, rtol=1e-05) def test_shifts_as_tensor_static(self): with program_guard(Program(), Program()): @@ -147,12 +147,12 @@ class TestRollAPI(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) [out_np] = exe.run(fetch_list=[out]) - self.assertTrue(np.allclose(out_np, expected_out)) + np.testing.assert_allclose(out_np, expected_out, rtol=1e-05) if paddle.is_compiled_with_cuda(): exe = fluid.Executor(fluid.CPUPlace()) [out_np] = exe.run(fetch_list=[out]) - self.assertTrue(np.allclose(out_np, expected_out)) + np.testing.assert_allclose(out_np, expected_out, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_run_program_op.py b/python/paddle/fluid/tests/unittests/test_run_program_op.py index 00deabbf72e..e1b4549cf07 100644 --- a/python/paddle/fluid/tests/unittests/test_run_program_op.py +++ b/python/paddle/fluid/tests/unittests/test_run_program_op.py @@ -114,7 +114,10 @@ class RunProgramOpTest(unittest.TestCase): # Step 2. compare output for expect_v, actual_v in six.moves.zip(self.expect_outs, actual_outs): - self.assertTrue(np.allclose(expect_v, actual_v.numpy(), atol=1e-5)) + np.testing.assert_allclose(expect_v, + actual_v.numpy(), + rtol=1e-05, + atol=1e-05) def check_grad_with_place(self, place): # Step 1. calc grads @@ -124,7 +127,10 @@ class RunProgramOpTest(unittest.TestCase): for expect_v, actual_v in six.moves.zip(self.expect_grads, actual_grads): np.testing.assert_array_almost_equal(expect_v, actual_v) - self.assertTrue(np.allclose(expect_v, actual_v, atol=1e-5)) + np.testing.assert_allclose(expect_v, + actual_v, + rtol=1e-05, + atol=1e-05) def prepare_dygraph_input(self, place, return_param_list=False): diff --git a/python/paddle/fluid/tests/unittests/test_segment_ops.py b/python/paddle/fluid/tests/unittests/test_segment_ops.py index 678a888eeda..d1a9aa6ae2f 100644 --- a/python/paddle/fluid/tests/unittests/test_segment_ops.py +++ b/python/paddle/fluid/tests/unittests/test_segment_ops.py @@ -252,9 +252,7 @@ class API_SegmentOpsTest(unittest.TestCase): fetch_list=[res_sum, res_mean, res_max, res_min]) for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): - self.assertTrue( - np.allclose(np_res, ret_res, atol=1e-6), "two value is\ - {}\n{}, check diff!".format(np_res, ret_res)) + np.testing.assert_allclose(np_res, ret_res, rtol=1e-05, atol=1e-06) def test_dygraph(self): device = paddle.CPUPlace() @@ -275,9 +273,10 @@ class API_SegmentOpsTest(unittest.TestCase): ret = [res_sum, res_mean, res_max, res_min] for np_res, ret_res in zip([np_sum, np_mean, np_max, np_min], ret): - self.assertTrue( - np.allclose(np_res, ret_res.numpy(), atol=1e-6), "two value is\ - {}\n{}, check diff!".format(np_res, ret_res)) + np.testing.assert_allclose(np_res, + ret_res.numpy(), + rtol=1e-05, + atol=1e-06) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_select_input_output_op.py b/python/paddle/fluid/tests/unittests/test_select_input_output_op.py index c809c973438..33103940247 100644 --- a/python/paddle/fluid/tests/unittests/test_select_input_output_op.py +++ b/python/paddle/fluid/tests/unittests/test_select_input_output_op.py @@ -61,8 +61,12 @@ class TestSplitMergeSelectedVarOps(unittest.TestCase): }, fetch_list=[y.name, x.grad_name]) x_grad = np.asarray([0.5, 0.5]).astype(np.float32) - self.assertTrue(np.allclose(np.asarray(ret[0]), feed_x)) - self.assertTrue(np.allclose(np.asarray(ret[1]), x_grad)) + np.testing.assert_allclose(np.asarray(ret[0]), + feed_x, + rtol=1e-05) + np.testing.assert_allclose(np.asarray(ret[1]), + x_grad, + rtol=1e-05) class TestSelectInputOpError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_selu_op.py b/python/paddle/fluid/tests/unittests/test_selu_op.py index 6807f96109e..46553abb38c 100644 --- a/python/paddle/fluid/tests/unittests/test_selu_op.py +++ b/python/paddle/fluid/tests/unittests/test_selu_op.py @@ -103,7 +103,7 @@ class TestSeluAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_selu(self.x_np, self.scale, self.alpha) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_api(self): paddle.disable_static(self.place) @@ -113,7 +113,7 @@ class TestSeluAPI(unittest.TestCase): out2 = selu(x) out_ref = ref_selu(self.x_np, self.scale, self.alpha) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() def test_fluid_api(self): @@ -123,7 +123,7 @@ class TestSeluAPI(unittest.TestCase): exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_selu(self.x_np, self.scale, self.alpha) - self.assertEqual(np.allclose(out_ref, res[0]), True) + np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): diff --git a/python/paddle/fluid/tests/unittests/test_sgd_op.py b/python/paddle/fluid/tests/unittests/test_sgd_op.py index 8f4f5dad074..c22fc6476d9 100644 --- a/python/paddle/fluid/tests/unittests/test_sgd_op.py +++ b/python/paddle/fluid/tests/unittests/test_sgd_op.py @@ -391,23 +391,24 @@ class TestSGDMultiPrecision2_0(unittest.TestCase): "Test dygraph mode" output1_dy, params1_dy = self.dygraph_sgd_mp(mp=True) output2_dy, params2_dy = self.dygraph_sgd_mp(mp=False) - self.assertEqual( - np.allclose(output1_dy.astype('float32').numpy(), - output2_dy.astype('float32').numpy(), - atol=1e-01), True) + np.testing.assert_allclose(output1_dy.astype('float32').numpy(), + output2_dy.astype('float32').numpy(), + rtol=1e-05, + atol=0.1) for idx in range(len(params1_dy)): - self.assertEqual( - np.allclose(params1_dy[idx].astype('float32').numpy(), - params2_dy[idx].astype('float32').numpy(), - atol=1e-01), True) + np.testing.assert_allclose( + params1_dy[idx].astype('float32').numpy(), + params2_dy[idx].astype('float32').numpy(), + rtol=1e-05, + atol=0.1) "Test static mode" output1_st = self.static_sgd_mp(mp=True) output2_st = self.static_sgd_mp(mp=False) for idx in range(len(output1_st)): - self.assertEqual( - np.allclose(output1_st[idx].astype('float32'), - output2_st[idx].astype('float32'), - atol=1e-01), True) + np.testing.assert_allclose(output1_st[idx].astype('float32'), + output2_st[idx].astype('float32'), + rtol=1e-05, + atol=0.1) class TestSGDMultiPrecision1_0(unittest.TestCase): @@ -493,23 +494,24 @@ class TestSGDMultiPrecision1_0(unittest.TestCase): "Test dygraph mode" output1_dy, params1_dy = self.dygraph_sgd_mp(mp=True) output2_dy, params2_dy = self.dygraph_sgd_mp(mp=False) - self.assertEqual( - np.allclose(output1_dy.astype('float32').numpy(), - output2_dy.astype('float32').numpy(), - atol=1e-01), True) + np.testing.assert_allclose(output1_dy.astype('float32').numpy(), + output2_dy.astype('float32').numpy(), + rtol=1e-05, + atol=0.1) for idx in range(len(params1_dy)): - self.assertEqual( - np.allclose(params1_dy[idx].astype('float32').numpy(), - params2_dy[idx].astype('float32').numpy(), - atol=1e-01), True) + np.testing.assert_allclose( + params1_dy[idx].astype('float32').numpy(), + params2_dy[idx].astype('float32').numpy(), + rtol=1e-05, + atol=0.1) "Test static mode" output1_st = self.static_sgd_mp(mp=True) output2_st = self.static_sgd_mp(mp=False) for idx in range(len(output1_st)): - self.assertEqual( - np.allclose(output1_st[idx].astype('float32'), - output2_st[idx].astype('float32'), - atol=1e-01), True) + np.testing.assert_allclose(output1_st[idx].astype('float32'), + output2_st[idx].astype('float32'), + rtol=1e-05, + atol=0.1) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_share_data_op.py b/python/paddle/fluid/tests/unittests/test_share_data_op.py index a049661eaab..56ac3af6d85 100644 --- a/python/paddle/fluid/tests/unittests/test_share_data_op.py +++ b/python/paddle/fluid/tests/unittests/test_share_data_op.py @@ -50,7 +50,7 @@ class TestShareDataOpOnDifferentPlaces(unittest.TestCase): op = Operator("share_data", X="X", Out="Out") op.run(scope, place) - self.assertTrue(np.allclose(np_array, out)) + np.testing.assert_allclose(np_array, out, rtol=1e-05) def check_with_selected_rows(self, place): scope = core.Scope() @@ -75,7 +75,7 @@ class TestShareDataOpOnDifferentPlaces(unittest.TestCase): out_height = out.height() out_rows = out.rows() - self.assertTrue(np.allclose(np_array, out_tensor)) + np.testing.assert_allclose(np_array, out_tensor, rtol=1e-05) self.assertEqual(x_height, out_height) self.assertEqual(x_rows, out_rows) diff --git a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py index c3cb57f9438..db43649c361 100644 --- a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py +++ b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py @@ -81,9 +81,9 @@ class TestShrinkRNNMemoryReferLoD(TestShrinkRNNMemoryBase): }, fetch_list=[self.mem1, self.mem2, self.mem3, self.x_grad], return_numpy=False) - self.assertTrue(np.allclose(tensor_np[0:6], outs[0])) - self.assertTrue(np.allclose(tensor_np[0:5], outs[1])) - self.assertTrue(np.allclose(tensor_np[0:2], outs[2])) + np.testing.assert_allclose(tensor_np[0:6], outs[0], rtol=1e-05) + np.testing.assert_allclose(tensor_np[0:5], outs[1], rtol=1e-05) + np.testing.assert_allclose(tensor_np[0:2], outs[2], rtol=1e-05) self.assertAlmostEqual(1.0, self.sum_lodtensor(outs[3]), delta=0.01) @@ -108,9 +108,9 @@ class TestShrinkRNNMemoryNoLoD(TestShrinkRNNMemoryBase): }, fetch_list=[self.mem1, self.mem2, self.mem3, self.x_grad], return_numpy=False) - self.assertTrue(np.allclose(tensor_np[0:3], outs[0])) - self.assertTrue(np.allclose(tensor_np[0:2], outs[1])) - self.assertTrue(np.allclose(tensor_np[0:1], outs[2])) + np.testing.assert_allclose(tensor_np[0:3], outs[0], rtol=1e-05) + np.testing.assert_allclose(tensor_np[0:2], outs[1], rtol=1e-05) + np.testing.assert_allclose(tensor_np[0:1], outs[2], rtol=1e-05) self.assertAlmostEqual(1.0, self.sum_lodtensor(outs[3]), delta=0.01) diff --git a/python/paddle/fluid/tests/unittests/test_signal.py b/python/paddle/fluid/tests/unittests/test_signal.py index 670b3aa40df..22e99ab49c2 100644 --- a/python/paddle/fluid/tests/unittests/test_signal.py +++ b/python/paddle/fluid/tests/unittests/test_signal.py @@ -656,16 +656,7 @@ def to_safe_name(s): ]) class TestFrame(unittest.TestCase): def test_frame(self): - self.assertTrue( - np.allclose( - frame_for_api_test(self.x, self.frame_length, self.hop_length, self.axis), - paddle.signal.frame( - paddle.to_tensor(self.x), - self.frame_length, - self.hop_length, - self.axis), - rtol=rtol.get(str(self.x.dtype)), - atol=atol.get(str(self.x.dtype)))) + np.testing.assert_allclose(frame_for_api_test(self.x, self.frame_length, self.hop_length, self.axis), paddle.signal.frame(paddle.to_tensor(self.x), self.frame_length, self.hop_length, self.axis), rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype))) @place(DEVICES) @@ -695,12 +686,7 @@ class TestFrameStatic(unittest.TestCase): [output] = exe.run(mp, feed={'input': self.x}, fetch_list=[output]) paddle.disable_static() - self.assertTrue( - np.allclose( - frame_for_api_test(self.x, self.frame_length, self.hop_length, self.axis), - output, - rtol=rtol.get(str(self.x.dtype)), - atol=atol.get(str(self.x.dtype)))) + np.testing.assert_allclose(frame_for_api_test(self.x, self.frame_length, self.hop_length, self.axis), output, rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype))) @place(DEVICES) @@ -735,15 +721,7 @@ class TestFrameException(unittest.TestCase): ]) class TestOverlapAdd(unittest.TestCase): def test_overlap_add(self): - self.assertTrue( - np.allclose( - overlap_add_for_api_test(self.x, self.hop_length, self.axis), - paddle.signal.overlap_add( - paddle.to_tensor(self.x), - self.hop_length, - self.axis), - rtol=rtol.get(str(self.x.dtype)), - atol=atol.get(str(self.x.dtype)))) + np.testing.assert_allclose(overlap_add_for_api_test(self.x, self.hop_length, self.axis), paddle.signal.overlap_add(paddle.to_tensor(self.x), self.hop_length, self.axis), rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype))) @place(DEVICES) @@ -772,12 +750,7 @@ class TestOverlapAddStatic(unittest.TestCase): [output] = exe.run(mp, feed={'input': self.x}, fetch_list=[output]) paddle.disable_static() - self.assertTrue( - np.allclose( - overlap_add_for_api_test(self.x, self.hop_length, self.axis), - output, - rtol=rtol.get(str(self.x.dtype)), - atol=atol.get(str(self.x.dtype)))) + np.testing.assert_allclose(overlap_add_for_api_test(self.x, self.hop_length, self.axis), output, rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype))) @place(DEVICES) @@ -852,21 +825,7 @@ class TestStft(unittest.TestCase): win_p = paddle.to_tensor(self.window) win_l = self.window - self.assertTrue( - np.allclose( - stft(self.x, self.n_fft, self.hop_length, self.win_length, win_l, self.center, self.pad_mode), - paddle.signal.stft( - paddle.to_tensor(self.x), - self.n_fft, - self.hop_length, - self.win_length, - win_p, - self.center, - self.pad_mode, - self.normalized, - self.onesided), - rtol=rtol.get(str(self.x.dtype)), - atol=atol.get(str(self.x.dtype)))) + np.testing.assert_allclose(stft(self.x, self.n_fft, self.hop_length, self.win_length, win_l, self.center, self.pad_mode), paddle.signal.stft(paddle.to_tensor(self.x), self.n_fft, self.hop_length, self.win_length, win_p, self.center, self.pad_mode, self.normalized, self.onesided), rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype))) @place(DEVICES) @@ -938,22 +897,7 @@ class TestIstft(unittest.TestCase): win_p = paddle.to_tensor(self.window) win_l = self.window - self.assertTrue( - np.allclose( - istft(self.x, self.hop_length, self.win_length, win_l, self.center, self.length), - paddle.signal.istft( - paddle.to_tensor(self.x), - self.n_fft, - self.hop_length, - self.win_length, - win_p, - self.center, - self.normalized, - self.onesided, - self.length, - self.return_complex), - rtol=rtol.get(str(self.x.dtype)), - atol=atol.get(str(self.x.dtype)))) + np.testing.assert_allclose(istft(self.x, self.hop_length, self.win_length, win_l, self.center, self.length), paddle.signal.istft(paddle.to_tensor(self.x), self.n_fft, self.hop_length, self.win_length, win_p, self.center, self.normalized, self.onesided, self.length, self.return_complex), rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype))) @place(DEVICES) diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index ddf0af21cdd..d660518f04e 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -641,7 +641,9 @@ class TestSliceApiEager(unittest.TestCase): grad_truth[-3:3, 0:2, 2:4] = 1 np.testing.assert_array_equal(grad_truth, a.gradient()) - self.assertTrue(np.allclose(a_1.numpy(), a[-3:3, 0:2, 2:4])) + np.testing.assert_allclose(a_1.numpy(), + a[-3:3, 0:2, 2:4], + rtol=1e-05) class TestSliceApiWithLoDTensorArray(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py index 3aa1cafd92f..106e42599f9 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py @@ -107,7 +107,7 @@ class TestDropoutBiasFuseOp3(unittest.TestCase): "mask": mask_in_np }, fetch_list=[rst]) - self.assertTrue(np.allclose(fetches[0], rst_np)) + np.testing.assert_allclose(fetches[0], rst_np, rtol=1e-05) def test_dygraph(self): with fluid.dygraph.guard(fluid.CUDAPlace(0)): @@ -119,7 +119,7 @@ class TestDropoutBiasFuseOp3(unittest.TestCase): input_mask = fluid.dygraph.to_variable(mask_in_np) rst = incubate.softmax_mask_fuse(input_x, input_mask) - self.assertTrue(np.allclose(rst, rst_np)) + np.testing.assert_allclose(rst, rst_np, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py index 53128e51298..567fafe2d7d 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py @@ -103,7 +103,7 @@ class TestDropoutBiasFuseOp2(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"x": x_in_np}, fetch_list=[rst]) - self.assertTrue(np.allclose(fetches[0], rst_np)) + np.testing.assert_allclose(fetches[0], rst_np, rtol=1e-05) def test_dygraph(self): for dtype in self.dtypes: @@ -113,7 +113,7 @@ class TestDropoutBiasFuseOp2(unittest.TestCase): input_x = fluid.dygraph.to_variable(x_in_np) rst = incubate.softmax_mask_fuse_upper_triangle(input_x) - self.assertTrue(np.allclose(rst, rst_np)) + np.testing.assert_allclose(rst, rst_np, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 8618e046893..49c43c0b2bc 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -390,7 +390,7 @@ class TestSoftmaxAPI(unittest.TestCase): res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_softmax(self.x_np, axis=-1, dtype=None) for r in res: - self.assertEqual(np.allclose(out_ref, r), True) + np.testing.assert_allclose(out_ref, r, rtol=1e-05) def test_dygraph_check(self): paddle.disable_static(self.place) @@ -402,7 +402,7 @@ class TestSoftmaxAPI(unittest.TestCase): out2 = m(x) out_ref = ref_softmax(self.x_np, axis=-1, dtype=None) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) out1 = self.softmax(x, axis=0) x = paddle.to_tensor(self.x_np) @@ -410,7 +410,7 @@ class TestSoftmaxAPI(unittest.TestCase): out2 = m(x) out_ref = ref_softmax(self.x_np, axis=0, dtype=None) for r in [out1, out2]: - self.assertEqual(np.allclose(out_ref, r.numpy()), True) + np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) # explicilty use float32 for ROCm, as MIOpen does not yet support float64 if core.is_compiled_with_rocm(): @@ -419,7 +419,7 @@ class TestSoftmaxAPI(unittest.TestCase): else: out = self.softmax(x, dtype=np.float64) out_ref = ref_softmax(self.x_np, axis=-1, dtype=np.float64) - self.assertEqual(np.allclose(out_ref, out.numpy()), True) + np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_solve_op.py b/python/paddle/fluid/tests/unittests/test_solve_op.py index 3162ecffbae..52a83b6df44 100644 --- a/python/paddle/fluid/tests/unittests/test_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_solve_op.py @@ -349,9 +349,9 @@ class TestSolveOpAPI_1(unittest.TestCase): "input_y": np_input_y }, fetch_list=[paddle_result]) - self.assertTrue( - np.allclose(fetches[0], np.linalg.solve(np_input_x, - np_input_y))) + np.testing.assert_allclose(fetches[0], + np.linalg.solve(np_input_x, np_input_y), + rtol=1e-05) def test_static(self): for place in self.place: @@ -370,8 +370,9 @@ class TestSolveOpAPI_1(unittest.TestCase): numpy_output = np.linalg.solve(input_x_np, input_y_np) paddle_output = paddle.linalg.solve(tensor_input_x, tensor_input_y) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() @@ -417,9 +418,9 @@ class TestSolveOpAPI_2(unittest.TestCase): "input_y": np_input_y }, fetch_list=[paddle_result]) - self.assertTrue( - np.allclose(fetches[0], np.linalg.solve(np_input_x, - np_input_y))) + np.testing.assert_allclose(fetches[0], + np.linalg.solve(np_input_x, np_input_y), + rtol=1e-05) def test_static(self): for place in self.place: @@ -437,8 +438,9 @@ class TestSolveOpAPI_2(unittest.TestCase): numpy_output = np.linalg.solve(input_x_np, input_y_np) paddle_output = paddle.linalg.solve(tensor_input_x, tensor_input_y) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() @@ -484,10 +486,9 @@ class TestSolveOpAPI_3(unittest.TestCase): "input_y": np_input_y }, fetch_list=[paddle_result]) - self.assertTrue( - np.allclose(fetches[0], - np.linalg.solve(np_input_x, np_input_y), - rtol=1.e-4)) + np.testing.assert_allclose(fetches[0], + np.linalg.solve(np_input_x, np_input_y), + rtol=0.0001) def test_static(self): for place in self.place: @@ -506,9 +507,9 @@ class TestSolveOpAPI_3(unittest.TestCase): numpy_output = np.linalg.solve(input_x_np, input_y_np) paddle_output = paddle.linalg.solve(tensor_input_x, tensor_input_y) - self.assertEqual( - np.allclose(numpy_output, paddle_output.numpy(), rtol=1.e-4), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=0.0001) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() @@ -553,9 +554,9 @@ class TestSolveOpAPI_4(unittest.TestCase): "input_y": np_input_y }, fetch_list=[paddle_result]) - self.assertTrue( - np.allclose(fetches[0], np.linalg.solve(np_input_x, - np_input_y))) + np.testing.assert_allclose(fetches[0], + np.linalg.solve(np_input_x, np_input_y), + rtol=1e-05) def test_static(self): for place in self.place: @@ -574,8 +575,9 @@ class TestSolveOpAPI_4(unittest.TestCase): numpy_output = np.linalg.solve(input_x_np, input_y_np) paddle_output = paddle.linalg.solve(tensor_input_x, tensor_input_y) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_sparse_addmm_op.py b/python/paddle/fluid/tests/unittests/test_sparse_addmm_op.py index 458ee25e410..ffd81640d70 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_addmm_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_addmm_op.py @@ -66,17 +66,21 @@ class TestAddmm(unittest.TestCase): sp_y.stop_gradient = False sp_out = paddle.incubate.sparse.addmm(sp_input, sp_x, sp_y, 3.0, 2.0) - self.assertTrue(np.allclose(sp_out.numpy(), dense_out.numpy())) + np.testing.assert_allclose(sp_out.numpy(), + dense_out.numpy(), + rtol=1e-05) if get_cuda_version() >= 11030: dense_out.backward() sp_out.backward() - self.assertTrue( - np.allclose(sp_input.grad.numpy(), dense_input.grad.numpy())) - self.assertTrue( - np.allclose(sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy())) - self.assertTrue(np.allclose(sp_y.grad.numpy(), - dense_y.grad.numpy())) + np.testing.assert_allclose(sp_input.grad.numpy(), + dense_input.grad.numpy(), + rtol=1e-05) + np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), + (dense_x.grad * mask).numpy(), + rtol=1e-05) + np.testing.assert_allclose(sp_y.grad.numpy(), + dense_y.grad.numpy(), + rtol=1e-05) @unittest.skipIf(not paddle.is_compiled_with_cuda() or get_cuda_version() < 11000, "only support cuda>=11.0") diff --git a/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py b/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py index f9e40584ee6..4337461d48d 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py @@ -396,8 +396,10 @@ class TestSparseAttentionAPI(unittest.TestCase): expected_result, __, __ = ref_batch_sparse_attention( Q_np, K_np, V_np, offset_np, columns_np) - self.assertTrue( - np.allclose(fetches_result, expected_result, atol=1e-5)) + np.testing.assert_allclose(fetches_result, + expected_result, + rtol=1e-05, + atol=1e-05) def test_dygraph(self): paddle.disable_static() @@ -453,8 +455,10 @@ class TestSparseAttentionAPI(unittest.TestCase): query, key, value, offset, columns) numpy_result = numpy_result.astype(self.dtype) - self.assertTrue( - np.allclose(paddle_result.numpy(), numpy_result, atol=1e-5)) + np.testing.assert_allclose(paddle_result.numpy(), + numpy_result, + rtol=1e-05, + atol=1e-05) class TestSparseAttentionAPITestFloat(TestSparseAttentionAPI): diff --git a/python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py b/python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py index 12546ea463a..149c4cfb22b 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py @@ -69,19 +69,19 @@ class TestSparseElementWiseAPI(unittest.TestCase): expect_res = op(dense_x, dense_y) expect_res.backward(expect_res) - self.assertTrue( - np.allclose(expect_res.numpy(), - actual_res.to_dense().numpy(), - equal_nan=True)) + np.testing.assert_allclose(expect_res.numpy(), + actual_res.to_dense().numpy(), + rtol=1e-05, + equal_nan=True) if not (op == __truediv__ and dtype in ['int32', 'int64']): - self.assertTrue( - np.allclose(dense_x.grad.numpy(), - csr_x.grad.to_dense().numpy(), - equal_nan=True)) - self.assertTrue( - np.allclose(dense_y.grad.numpy(), - csr_y.grad.to_dense().numpy(), - equal_nan=True)) + np.testing.assert_allclose(dense_x.grad.numpy(), + csr_x.grad.to_dense().numpy(), + rtol=1e-05, + equal_nan=True) + np.testing.assert_allclose(dense_y.grad.numpy(), + csr_y.grad.to_dense().numpy(), + rtol=1e-05, + equal_nan=True) def func_test_coo(self, op): for sparse_dim in range(len(self.coo_shape) - 1, len(self.coo_shape)): @@ -109,18 +109,18 @@ class TestSparseElementWiseAPI(unittest.TestCase): expect_res = op(dense_x, dense_y) expect_res.backward(expect_res) - self.assertTrue( - np.allclose(expect_res.numpy(), - actual_res.to_dense().numpy(), - equal_nan=True)) - self.assertTrue( - np.allclose(dense_x.grad.numpy(), - coo_x.grad.to_dense().numpy(), - equal_nan=True)) - self.assertTrue( - np.allclose(dense_y.grad.numpy(), - coo_y.grad.to_dense().numpy(), - equal_nan=True)) + np.testing.assert_allclose(expect_res.numpy(), + actual_res.to_dense().numpy(), + rtol=1e-05, + equal_nan=True) + np.testing.assert_allclose(dense_x.grad.numpy(), + coo_x.grad.to_dense().numpy(), + rtol=1e-05, + equal_nan=True) + np.testing.assert_allclose(dense_y.grad.numpy(), + coo_y.grad.to_dense().numpy(), + rtol=1e-05, + equal_nan=True) def test_support_dtypes_csr(self): paddle.device.set_device('cpu') diff --git a/python/paddle/fluid/tests/unittests/test_sparse_fused_attention_op.py b/python/paddle/fluid/tests/unittests/test_sparse_fused_attention_op.py index 58a3c1ad201..996ba3a0114 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_fused_attention_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_fused_attention_op.py @@ -107,12 +107,18 @@ class TestSparseAttentionAPI1(unittest.TestCase): query_sp, key_sp, value_sp, sp_mask) output_sp.backward() - self.assertTrue(np.allclose(output_sp.numpy(), output.numpy())) - self.assertTrue( - np.allclose(query_sp.grad.numpy(), query.grad.numpy())) - self.assertTrue(np.allclose(key_sp.grad.numpy(), key.grad.numpy())) - self.assertTrue( - np.allclose(value_sp.grad.numpy(), value.grad.numpy())) + np.testing.assert_allclose(output_sp.numpy(), + output.numpy(), + rtol=1e-05) + np.testing.assert_allclose(query_sp.grad.numpy(), + query.grad.numpy(), + rtol=1e-05) + np.testing.assert_allclose(key_sp.grad.numpy(), + key.grad.numpy(), + rtol=1e-05) + np.testing.assert_allclose(value_sp.grad.numpy(), + value.grad.numpy(), + rtol=1e-05) class TestSparseAttentionAPI2(TestSparseAttentionAPI1): diff --git a/python/paddle/fluid/tests/unittests/test_sparse_matmul_op.py b/python/paddle/fluid/tests/unittests/test_sparse_matmul_op.py index 8986d4a7ef5..47f334b1a3f 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_matmul_op.py @@ -60,15 +60,18 @@ class TestMatmul(unittest.TestCase): sp_y.stop_gradient = False sp_out = paddle.incubate.sparse.matmul(sp_x, sp_y) - self.assertTrue(np.allclose(sp_out.numpy(), dense_out.numpy())) + np.testing.assert_allclose(sp_out.numpy(), + dense_out.numpy(), + rtol=1e-05) if get_cuda_version() >= 11030: dense_out.backward() sp_out.backward() - self.assertTrue( - np.allclose(sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy())) - self.assertTrue(np.allclose(sp_y.grad.numpy(), - dense_y.grad.numpy())) + np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), + (dense_x.grad * mask).numpy(), + rtol=1e-05) + np.testing.assert_allclose(sp_y.grad.numpy(), + dense_y.grad.numpy(), + rtol=1e-05) @unittest.skipIf(not paddle.is_compiled_with_cuda() or get_cuda_version() < 11000, "only support cuda>=11.0") @@ -106,14 +109,20 @@ class TestMaskedMatmul(unittest.TestCase): mask = paddle.to_tensor(np.ones([10, 6]) * np_mask).to_sparse_csr() out = paddle.incubate.sparse.masked_matmul(x, y, mask) - self.assertTrue(np.allclose(np_out.indptr, out.crows().numpy())) - self.assertTrue(np.allclose(np_out.indices, out.cols().numpy())) - self.assertTrue(np.allclose(np_out.data, out.values().numpy())) + np.testing.assert_allclose(np_out.indptr, + out.crows().numpy(), + rtol=1e-05) + np.testing.assert_allclose(np_out.indices, + out.cols().numpy(), + rtol=1e-05) + np.testing.assert_allclose(np_out.data, + out.values().numpy(), + rtol=1e-05) out.backward() - self.assertTrue(np.allclose(out.is_sparse_csr(), True)) - self.assertTrue(np.allclose(np_x_grad, x.grad.numpy())) - self.assertTrue(np.allclose(np_y_grad, y.grad.numpy())) + np.testing.assert_allclose(out.is_sparse_csr(), True, rtol=1e-05) + np.testing.assert_allclose(np_x_grad, x.grad.numpy(), rtol=1e-05) + np.testing.assert_allclose(np_y_grad, y.grad.numpy(), rtol=1e-05) @unittest.skipIf(not paddle.is_compiled_with_cuda() or get_cuda_version() < 11070, @@ -139,11 +148,15 @@ class TestMaskedMatmul(unittest.TestCase): sp_out = paddle.incubate.sparse.matmul(sp_x, sp_y) sp_out.backward() - self.assertTrue(np.allclose(sp_out.numpy(), dense_out.numpy())) - self.assertTrue( - np.allclose(sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy())) - self.assertTrue(np.allclose(sp_y.grad.numpy(), dense_y.grad.numpy())) + np.testing.assert_allclose(sp_out.numpy(), + dense_out.numpy(), + rtol=1e-05) + np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), + (dense_x.grad * mask).numpy(), + rtol=1e-05) + np.testing.assert_allclose(sp_y.grad.numpy(), + dense_y.grad.numpy(), + rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_sparse_mv_op.py b/python/paddle/fluid/tests/unittests/test_sparse_mv_op.py index 9ac4fff8500..1631a2a7a59 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_mv_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_mv_op.py @@ -64,12 +64,15 @@ class TestCsrMv(unittest.TestCase): sp_out = paddle.incubate.sparse.mv(sp_x, sp_vec) sp_out.backward() - self.assertTrue(np.allclose(sp_out.numpy(), dense_out.numpy())) - self.assertTrue( - np.allclose(sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy())) - self.assertTrue( - np.allclose(sp_vec.grad.numpy(), dense_vec.grad.numpy())) + np.testing.assert_allclose(sp_out.numpy(), + dense_out.numpy(), + rtol=1e-05) + np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), + (dense_x.grad * mask).numpy(), + rtol=1e-05) + np.testing.assert_allclose(sp_vec.grad.numpy(), + dense_vec.grad.numpy(), + rtol=1e-05) @unittest.skipIf( @@ -99,12 +102,15 @@ class TestCooMv(unittest.TestCase): sp_out = paddle.incubate.sparse.mv(sp_x, sp_vec) sp_out.backward() - self.assertTrue(np.allclose(sp_out.numpy(), dense_out.numpy())) - self.assertTrue( - np.allclose(sp_x.grad.to_dense().numpy(), - (dense_x.grad * mask).numpy())) - self.assertTrue( - np.allclose(sp_vec.grad.numpy(), dense_vec.grad.numpy())) + np.testing.assert_allclose(sp_out.numpy(), + dense_out.numpy(), + rtol=1e-05) + np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), + (dense_x.grad * mask).numpy(), + rtol=1e-05) + np.testing.assert_allclose(sp_vec.grad.numpy(), + dense_vec.grad.numpy(), + rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py b/python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py index b1026b080ca..76751e0f87e 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_softmax_op.py @@ -50,9 +50,13 @@ class TestCsrSoftmax(unittest.TestCase): csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr() m = paddle.incubate.sparse.nn.Softmax() out = m(csr) - self.assertTrue(np.allclose(out.crows().numpy(), np_csr.indptr)) - self.assertTrue(np.allclose(out.cols().numpy(), np_csr.indices)) - self.assertTrue(np.allclose(out.values().numpy(), np_out)) + np.testing.assert_allclose(out.crows().numpy(), + np_csr.indptr, + rtol=1e-05) + np.testing.assert_allclose(out.cols().numpy(), + np_csr.indices, + rtol=1e-05) + np.testing.assert_allclose(out.values().numpy(), np_out, rtol=1e-05) # dx = (dout - sum(dout * out)) * out, dout=rand_x out.backward(csr.detach()) @@ -67,11 +71,15 @@ class TestCsrSoftmax(unittest.TestCase): sum = np.sum(dout * out, keepdims=True) dx = np.concatenate([dx, (dout - sum) * out]) - self.assertTrue(np.allclose(csr.grad.crows().numpy(), - np_csr.indptr)) - self.assertTrue(np.allclose(csr.grad.cols().numpy(), - np_csr.indices)) - self.assertTrue(np.allclose(csr.grad.values().numpy(), dx)) + np.testing.assert_allclose(csr.grad.crows().numpy(), + np_csr.indptr, + rtol=1e-05) + np.testing.assert_allclose(csr.grad.cols().numpy(), + np_csr.indices, + rtol=1e-05) + np.testing.assert_allclose(csr.grad.values().numpy(), + dx, + rtol=1e-05) def test_softmax3d(self): with _test_eager_guard(): @@ -99,7 +107,7 @@ class TestCsrSoftmax(unittest.TestCase): csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr() m = paddle.incubate.sparse.nn.Softmax() out = m(csr) - self.assertTrue(np.allclose(out.values().numpy(), np_out)) + np.testing.assert_allclose(out.values().numpy(), np_out, rtol=1e-05) # dx = (dout - sum(dout * out)) * out, dout=rand_x out.backward(csr.detach()) @@ -120,7 +128,9 @@ class TestCsrSoftmax(unittest.TestCase): batch_offset += np_csr.nnz - self.assertTrue(np.allclose(csr.grad.values().numpy(), dx)) + np.testing.assert_allclose(csr.grad.values().numpy(), + dx, + rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_sparse_unary_op.py b/python/paddle/fluid/tests/unittests/test_sparse_unary_op.py index 8ac4d777317..7abbaa84adb 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_unary_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_unary_op.py @@ -63,15 +63,18 @@ class TestSparseUnary(unittest.TestCase): dense_out.backward() # compare forward - self.assertTrue( - np.allclose(sp_out.to_dense().numpy(), dense_out.numpy())) + np.testing.assert_allclose(sp_out.to_dense().numpy(), + dense_out.numpy(), + rtol=1e-05) # compare backward if dense_func == paddle.sqrt: expect_grad = np.nan_to_num(dense_x.grad.numpy(), 0., 0., 0.) else: expect_grad = (dense_x.grad * mask).numpy() - self.assertTrue(np.allclose(sp_x.grad.to_dense().numpy(), expect_grad)) + np.testing.assert_allclose(sp_x.grad.to_dense().numpy(), + expect_grad, + rtol=1e-05) def compare_with_dense(self, dense_func, sparse_func): self.check_result(dense_func, sparse_func, 'coo') diff --git a/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py b/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py index e027401549a..e8c1dbd7afb 100644 --- a/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py @@ -168,7 +168,9 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): self.check_tensor_same(var_out, expect_out) def check_tensor_same(self, actual, expect): - self.assertTrue(np.allclose(np.array(actual), np.array(expect))) + np.testing.assert_allclose(np.array(actual), + np.array(expect), + rtol=1e-05) self.assertEqual(actual.recursive_sequence_lengths(), expect.recursive_sequence_lengths()) diff --git a/python/paddle/fluid/tests/unittests/test_split_op.py b/python/paddle/fluid/tests/unittests/test_split_op.py index 2c71ada5612..b9e92913c11 100644 --- a/python/paddle/fluid/tests/unittests/test_split_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_op.py @@ -363,9 +363,9 @@ class API_TestSplit(unittest.TestCase): }, fetch_list=[x0, x1, x2]) ex_x0, ex_x1, ex_x2 = np.split(input1, 3, axis=2) - self.assertTrue(np.allclose(ex_x0, r0)) - self.assertTrue(np.allclose(ex_x1, r1)) - self.assertTrue(np.allclose(ex_x2, r2)) + np.testing.assert_allclose(ex_x0, r0, rtol=1e-05) + np.testing.assert_allclose(ex_x1, r1, rtol=1e-05) + np.testing.assert_allclose(ex_x2, r2, rtol=1e-05) class API_TestSplit2(unittest.TestCase): @@ -380,9 +380,9 @@ class API_TestSplit2(unittest.TestCase): r0, r1, r2, = exe.run(feed={"data1": input1}, fetch_list=[x0, x1, x2]) ex_x0, ex_x1, ex_x2 = np.split(input1, 3, axis=2) - self.assertTrue(np.allclose(ex_x0, r0)) - self.assertTrue(np.allclose(ex_x1, r1)) - self.assertTrue(np.allclose(ex_x2, r2)) + np.testing.assert_allclose(ex_x0, r0, rtol=1e-05) + np.testing.assert_allclose(ex_x1, r1, rtol=1e-05) + np.testing.assert_allclose(ex_x2, r2, rtol=1e-05) class API_TestSplit3(unittest.TestCase): @@ -396,8 +396,8 @@ class API_TestSplit3(unittest.TestCase): input1 = np.random.random([1, 10]).astype('float64') r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1]) ex_x0, ex_x1 = np.split(input1, (3, ), axis=1) - self.assertTrue(np.allclose(ex_x0, r0)) - self.assertTrue(np.allclose(ex_x1, r1)) + np.testing.assert_allclose(ex_x0, r0, rtol=1e-05) + np.testing.assert_allclose(ex_x1, r1, rtol=1e-05) class API_TestSplit4(unittest.TestCase): @@ -417,8 +417,8 @@ class API_TestSplit4(unittest.TestCase): }, fetch_list=[x0, x1]) ex_x0, ex_x1 = np.split(input1, (3, ), axis=1) - self.assertTrue(np.allclose(ex_x0, r0)) - self.assertTrue(np.allclose(ex_x1, r1)) + np.testing.assert_allclose(ex_x0, r0, rtol=1e-05) + np.testing.assert_allclose(ex_x1, r1, rtol=1e-05) class API_TestDygraphSplit(unittest.TestCase): @@ -446,14 +446,16 @@ class API_TestDygraphSplit(unittest.TestCase): loss.backward() manul_grad = np.zeros_like(input_1) manul_grad[:, :2, :] = 1 - self.assertTrue(np.allclose(input.gradient(), manul_grad)) - self.assertTrue(np.allclose(ex_x0, eager_x0_out)) - self.assertTrue(np.allclose(ex_x1, eager_x1_out)) - self.assertTrue(np.allclose(ex_x2, eager_x2_out)) + np.testing.assert_allclose(input.gradient(), + manul_grad, + rtol=1e-05) + np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05) + np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05) + np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05) - self.assertTrue(np.allclose(ex_x0, x0_out)) - self.assertTrue(np.allclose(ex_x1, x1_out)) - self.assertTrue(np.allclose(ex_x2, x2_out)) + np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05) + np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05) + np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) def test_out2(self): with fluid.dygraph.guard(): @@ -465,9 +467,9 @@ class API_TestDygraphSplit(unittest.TestCase): x1_out = x1.numpy() x2_out = x2.numpy() ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1) - self.assertTrue(np.allclose(ex_x0, x0_out)) - self.assertTrue(np.allclose(ex_x1, x1_out)) - self.assertTrue(np.allclose(ex_x2, x2_out)) + np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05) + np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05) + np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) def test_out_tensor_input(self): with fluid.dygraph.guard(): @@ -482,9 +484,9 @@ class API_TestDygraphSplit(unittest.TestCase): x1_out = x1.numpy() x2_out = x2.numpy() ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1) - self.assertTrue(np.allclose(ex_x0, x0_out)) - self.assertTrue(np.allclose(ex_x1, x1_out)) - self.assertTrue(np.allclose(ex_x2, x2_out)) + np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05) + np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05) + np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) def test_axis_tensor_input(self): with fluid.dygraph.guard(): @@ -499,9 +501,9 @@ class API_TestDygraphSplit(unittest.TestCase): x1_out = x1.numpy() x2_out = x2.numpy() ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1) - self.assertTrue(np.allclose(ex_x0, x0_out)) - self.assertTrue(np.allclose(ex_x1, x1_out)) - self.assertTrue(np.allclose(ex_x2, x2_out)) + np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05) + np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05) + np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) def func_negative_one_section(self): with fluid.dygraph.guard(): @@ -534,9 +536,9 @@ class API_TestEmptySplit(unittest.TestCase): 5, 5, ]) - self.assertTrue(np.allclose(ex_x0, x0_out)) - self.assertTrue(np.allclose(ex_x1, x1_out)) - self.assertTrue(np.allclose(ex_x2, x2_out)) + np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05) + np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05) + np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_squeeze_op.py b/python/paddle/fluid/tests/unittests/test_squeeze_op.py index c7a0724d372..d64cfcaa8d2 100755 --- a/python/paddle/fluid/tests/unittests/test_squeeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_squeeze_op.py @@ -155,7 +155,7 @@ class API_TestSqueeze(unittest.TestCase): result, = exe.run(feed={"data1": input1}, fetch_list=[result_squeeze]) expected_result = np.squeeze(input1, axis=1) - self.assertTrue(np.allclose(expected_result, result)) + np.testing.assert_allclose(expected_result, result, rtol=1e-05) class API_TestStaticSqueeze_(API_TestSqueeze): @@ -179,7 +179,7 @@ class API_TestDygraphSqueeze(unittest.TestCase): output = self.squeeze(input, axis=[1]) out_np = output.numpy() expected_out = np.squeeze(input_1, axis=1) - self.assertTrue(np.allclose(expected_out, out_np)) + np.testing.assert_allclose(expected_out, out_np, rtol=1e-05) def test_out_int8(self): paddle.disable_static() @@ -188,7 +188,7 @@ class API_TestDygraphSqueeze(unittest.TestCase): output = self.squeeze(input, axis=[1]) out_np = output.numpy() expected_out = np.squeeze(input_1, axis=1) - self.assertTrue(np.allclose(expected_out, out_np)) + np.testing.assert_allclose(expected_out, out_np, rtol=1e-05) def test_out_uint8(self): paddle.disable_static() @@ -197,7 +197,7 @@ class API_TestDygraphSqueeze(unittest.TestCase): output = self.squeeze(input, axis=[1]) out_np = output.numpy() expected_out = np.squeeze(input_1, axis=1) - self.assertTrue(np.allclose(expected_out, out_np)) + np.testing.assert_allclose(expected_out, out_np, rtol=1e-05) def test_axis_not_list(self): paddle.disable_static() @@ -206,7 +206,7 @@ class API_TestDygraphSqueeze(unittest.TestCase): output = self.squeeze(input, axis=1) out_np = output.numpy() expected_out = np.squeeze(input_1, axis=1) - self.assertTrue(np.allclose(expected_out, out_np)) + np.testing.assert_allclose(expected_out, out_np, rtol=1e-05) def test_dimension_not_1(self): paddle.disable_static() @@ -215,7 +215,7 @@ class API_TestDygraphSqueeze(unittest.TestCase): output = self.squeeze(input, axis=(1, 0)) out_np = output.numpy() expected_out = np.squeeze(input_1, axis=1) - self.assertTrue(np.allclose(expected_out, out_np)) + np.testing.assert_allclose(expected_out, out_np, rtol=1e-05) class API_TestDygraphSqueezeInplace(API_TestDygraphSqueeze): diff --git a/python/paddle/fluid/tests/unittests/test_stack_op.py b/python/paddle/fluid/tests/unittests/test_stack_op.py index 5bd9e2634a0..f7b1254c880 100644 --- a/python/paddle/fluid/tests/unittests/test_stack_op.py +++ b/python/paddle/fluid/tests/unittests/test_stack_op.py @@ -232,7 +232,7 @@ class API_test(unittest.TestCase): }, fetch_list=[result_stack]) expected_result = np.stack([input1, input2, input3], axis=0) - self.assertTrue(np.allclose(expected_result, result)) + np.testing.assert_allclose(expected_result, result, rtol=1e-05) def test_single_tensor_error(self): with fluid.program_guard(fluid.Program(), fluid.Program()): @@ -253,14 +253,14 @@ class API_DygraphTest(unittest.TestCase): result = paddle.stack([x1, x2, x3]) result_np = result.numpy() expected_result = np.stack([data1, data2, data3]) - self.assertTrue(np.allclose(expected_result, result_np)) + np.testing.assert_allclose(expected_result, result_np, rtol=1e-05) with fluid.dygraph.guard(): y1 = fluid.dygraph.to_variable(data1) result = paddle.stack([y1], axis=0) result_np_2 = result.numpy() expected_result_2 = np.stack([data1], axis=0) - self.assertTrue(np.allclose(expected_result_2, result_np_2)) + np.testing.assert_allclose(expected_result_2, result_np_2, rtol=1e-05) def test_single_tensor_error(self): with fluid.dygraph.guard(): diff --git a/python/paddle/fluid/tests/unittests/test_std_layer.py b/python/paddle/fluid/tests/unittests/test_std_layer.py index 4252899eba6..a724e320c95 100644 --- a/python/paddle/fluid/tests/unittests/test_std_layer.py +++ b/python/paddle/fluid/tests/unittests/test_std_layer.py @@ -63,7 +63,7 @@ class TestStdAPI(unittest.TestCase): out_dygraph = self.dygraph() out_static = self.static() for out in [out_dygraph, out_static]: - self.assertTrue(np.allclose(out_ref, out)) + np.testing.assert_allclose(out_ref, out, rtol=1e-05) self.assertTrue(np.equal(out_ref.shape, out.shape).all()) @@ -111,8 +111,8 @@ class TestStdAPI_alias(unittest.TestCase): out1 = paddle.std(x).numpy() out2 = paddle.tensor.std(x).numpy() out3 = paddle.tensor.stat.std(x).numpy() - self.assertTrue(np.allclose(out1, out2)) - self.assertTrue(np.allclose(out1, out3)) + np.testing.assert_allclose(out1, out2, rtol=1e-05) + np.testing.assert_allclose(out1, out3, rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_subtract_op.py b/python/paddle/fluid/tests/unittests/test_subtract_op.py index d7d9d3c8e25..dbf838bd232 100644 --- a/python/paddle/fluid/tests/unittests/test_subtract_op.py +++ b/python/paddle/fluid/tests/unittests/test_subtract_op.py @@ -53,7 +53,7 @@ class ApiSubtractTest(unittest.TestCase): "y": self.input_y }, fetch_list=[result_max]) - self.assertTrue(np.allclose(res, self.np_expected1)) + np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -66,7 +66,7 @@ class ApiSubtractTest(unittest.TestCase): "z": self.input_z }, fetch_list=[result_max]) - self.assertTrue(np.allclose(res, self.np_expected2)) + np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -79,7 +79,7 @@ class ApiSubtractTest(unittest.TestCase): "c": self.input_c }, fetch_list=[result_max]) - self.assertTrue(np.allclose(res, self.np_expected3)) + np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -92,7 +92,7 @@ class ApiSubtractTest(unittest.TestCase): "c": self.input_c }, fetch_list=[result_max]) - self.assertTrue(np.allclose(res, self.np_expected4)) + np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) def test_dynamic_api(self): paddle.disable_static() @@ -106,17 +106,17 @@ class ApiSubtractTest(unittest.TestCase): res = paddle.subtract(x, y) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected1)) + np.testing.assert_allclose(res, self.np_expected1, rtol=1e-05) # test broadcast res = paddle.subtract(x, z) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected2)) + np.testing.assert_allclose(res, self.np_expected2, rtol=1e-05) res = paddle.subtract(a, c) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected3)) + np.testing.assert_allclose(res, self.np_expected3, rtol=1e-05) res = paddle.subtract(b, c) res = res.numpy() - self.assertTrue(np.allclose(res, self.np_expected4)) + np.testing.assert_allclose(res, self.np_expected4, rtol=1e-05) diff --git a/python/paddle/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py index 2d2bc8487ca..a431f79af61 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_op.py @@ -398,9 +398,9 @@ class API_Test_Add_n(unittest.TestCase): expected_dx = np.array([[1, 1, 1], [1, 1, 1]]) expected_dy = np.array([[1, 1, 1], [1, 1, 1]]) - self.assertTrue(np.allclose(out, expected_out)) - self.assertTrue(np.allclose(dx, expected_dx)) - self.assertTrue(np.allclose(dy, expected_dy)) + np.testing.assert_allclose(out, expected_out, rtol=1e-05) + np.testing.assert_allclose(dx, expected_dx, rtol=1e-05) + np.testing.assert_allclose(dy, expected_dy, rtol=1e-05) class TestRaiseSumError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_svd_op.py b/python/paddle/fluid/tests/unittests/test_svd_op.py index b3cd48b05c0..2594bea76dd 100644 --- a/python/paddle/fluid/tests/unittests/test_svd_op.py +++ b/python/paddle/fluid/tests/unittests/test_svd_op.py @@ -285,7 +285,7 @@ class TestSvdAPI(unittest.TestCase): x = paddle.to_tensor(a) u, s, vh = paddle.linalg.svd(x) gt_u, gt_s, gt_vh = np.linalg.svd(a, full_matrices=False) - self.assertTrue(np.allclose(s, gt_s)) + np.testing.assert_allclose(s, gt_s, rtol=1e-05) def test_static(self): paddle.enable_static() @@ -304,7 +304,7 @@ class TestSvdAPI(unittest.TestCase): fetches = exe.run(fluid.default_main_program(), feed={"input": a}, fetch_list=[s]) - self.assertTrue(np.allclose(fetches[0], gt_s)) + np.testing.assert_allclose(fetches[0], gt_s, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_switch_case.py b/python/paddle/fluid/tests/unittests/test_switch_case.py index 814e46fb341..5b6d13e7bb8 100644 --- a/python/paddle/fluid/tests/unittests/test_switch_case.py +++ b/python/paddle/fluid/tests/unittests/test_switch_case.py @@ -77,16 +77,31 @@ class TestAPISwitchCase(unittest.TestCase): res = exe.run(main_program, fetch_list=[out_0, out_1, out_2, out_3, out_4]) - self.assertTrue(np.allclose(res[0], 1), - "result is {} but answer is {}".format(res[0], 1)) - self.assertTrue(np.allclose(res[1], 2), - "result is {} but answer is {}".format(res[0], 2)) - self.assertTrue(np.allclose(res[2], 3), - "result is {} but answer is {}".format(res[0], 3)) - self.assertTrue(np.allclose(res[3], 2), - "result is {} but answer is {}".format(res[0], 2)) - self.assertTrue(np.allclose(res[4], 2), - "result is {} but answer is {}".format(res[0], 2)) + np.testing.assert_allclose( + res[0], + 1, + rtol=1e-05, + err_msg='result is {} but answer is {}'.format(res[0], 1)) + np.testing.assert_allclose( + res[1], + 2, + rtol=1e-05, + err_msg='result is {} but answer is {}'.format(res[0], 2)) + np.testing.assert_allclose( + res[2], + 3, + rtol=1e-05, + err_msg='result is {} but answer is {}'.format(res[0], 3)) + np.testing.assert_allclose( + res[3], + 2, + rtol=1e-05, + err_msg='result is {} but answer is {}'.format(res[0], 2)) + np.testing.assert_allclose( + res[4], + 2, + rtol=1e-05, + err_msg='result is {} but answer is {}'.format(res[0], 2)) def test_return_var_tuple(self): @@ -123,10 +138,12 @@ class TestAPISwitchCase(unittest.TestCase): exe = fluid.Executor(place) ret = exe.run(main_program, fetch_list=out) - self.assertTrue( - np.allclose(np.asarray(ret[0]), np.full((1, 2), 1, np.int32))) - self.assertTrue( - np.allclose(np.asarray(ret[1]), np.full((2, 3), 2, np.float32))) + np.testing.assert_allclose(np.asarray(ret[0]), + np.full((1, 2), 1, np.int32), + rtol=1e-05) + np.testing.assert_allclose(np.asarray(ret[1]), + np.full((2, 3), 2, np.float32), + rtol=1e-05) class TestAPISwitchCase_Nested(unittest.TestCase): @@ -213,12 +230,21 @@ class TestAPISwitchCase_Nested(unittest.TestCase): feed={"index_1": np.array([1], dtype="uint8")}, fetch_list=[out_1, out_2, out_3]) - self.assertTrue(np.allclose(res[0], 1), - "result is {} but answer is {}".format(res[0], 1)) - self.assertTrue(np.allclose(res[1], 2), - "result is {} but answer is {}".format(res[1], 2)) - self.assertTrue(np.allclose(res[2], 3), - "result is {} but answer is {}".format(res[2], 3)) + np.testing.assert_allclose( + res[0], + 1, + rtol=1e-05, + err_msg='result is {} but answer is {}'.format(res[0], 1)) + np.testing.assert_allclose( + res[1], + 2, + rtol=1e-05, + err_msg='result is {} but answer is {}'.format(res[1], 2)) + np.testing.assert_allclose( + res[2], + 3, + rtol=1e-05, + err_msg='result is {} but answer is {}'.format(res[2], 3)) # test TypeError and ValueError of api switch_case diff --git a/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py index 06da617f26f..f55d60d8564 100644 --- a/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py @@ -168,10 +168,14 @@ class TestSyncBatchNormOpTraining(unittest.TestCase): sync_bn_val = sync_bn_fetches[i] if sync_bn_val.shape != bn_val.shape: sync_bn_val = sync_bn_val[:bn_val.shape[0]] - self.assertTrue( - np.allclose(bn_val, sync_bn_val, atol=self.atol), - "Output (" + fetch_names[i] + ") has diff. \n" + "\nBN " + - str(bn_val) + "\n" + "Sync BN " + str(sync_bn_val)) + np.testing.assert_allclose(bn_val, + sync_bn_val, + rtol=1e-05, + atol=self.atol, + err_msg='Output (' + fetch_names[i] + + ') has diff. \n' + '\nBN ' + + str(bn_val) + '\n' + 'Sync BN ' + + str(sync_bn_val)) def test_train(self): """Test training.""" @@ -328,10 +332,12 @@ class TestConvertSyncBatchNormCase2(unittest.TestCase): x = paddle.to_tensor(data) bn_out = bn_model(x) sybn_out = sybn_model(x) - self.assertTrue( - np.allclose(bn_out.numpy(), sybn_out.numpy()), - "Output has diff. \n" + "\nBN " + str(bn_out.numpy()) + - "\n" + "Sync BN " + str(sybn_out.numpy())) + np.testing.assert_allclose( + bn_out.numpy(), + sybn_out.numpy(), + rtol=1e-05, + err_msg='Output has diff. \n' + '\nBN ' + + str(bn_out.numpy()) + '\n' + 'Sync BN ' + str(sybn_out.numpy())) class TestDygraphSyncBatchNormDataFormatError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py b/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py index 34ca5860a16..bdc3f7f439f 100644 --- a/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py +++ b/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py @@ -98,7 +98,7 @@ class TestTakeAlongAxisAPI(unittest.TestCase): out_ref = np.array( np.take_along_axis(self.x_np, self.index_np, self.axis)) for out in res: - self.assertEqual(np.allclose(out, out_ref, rtol=1e-03), True) + np.testing.assert_allclose(out, out_ref, rtol=0.001) def test_api_dygraph(self): paddle.disable_static(self.place[0]) @@ -107,7 +107,7 @@ class TestTakeAlongAxisAPI(unittest.TestCase): out = paddle.take_along_axis(x_tensor, self.index, self.axis) out_ref = np.array( np.take_along_axis(self.x_np, self.index_np, self.axis)) - self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-03), True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=0.001) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_tensor_uva.py b/python/paddle/fluid/tests/unittests/test_tensor_uva.py index a2f0bfa6515..8133ce50567 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_uva.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_uva.py @@ -54,8 +54,8 @@ class TestUVATensorFromNumpy(unittest.TestCase): self.assertTrue(tensor.place.is_gpu_place()) self.assertTrue(tensor2.place.is_gpu_place()) - self.assertTrue(np.allclose(tensor.numpy(), data)) - self.assertTrue(np.allclose(tensor2.numpy(), data)) + np.testing.assert_allclose(tensor.numpy(), data, rtol=1e-05) + np.testing.assert_allclose(tensor2.numpy(), data, rtol=1e-05) def test_uva_tensor_creation(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_tf32_cublas.py b/python/paddle/fluid/tests/unittests/test_tf32_cublas.py index ce08c8db89e..f59b7e06c7d 100644 --- a/python/paddle/fluid/tests/unittests/test_tf32_cublas.py +++ b/python/paddle/fluid/tests/unittests/test_tf32_cublas.py @@ -49,7 +49,7 @@ class TestTF32OnMatmul(unittest.TestCase): data2 = paddle.to_tensor(input_array2) out = paddle.matmul(data1, data2) expected_result = np.matmul(input_array1, input_array2) - self.assertTrue(np.allclose(expected_result, out.numpy(), 1e-03)) + np.testing.assert_allclose(expected_result, out.numpy(), rtol=0.001) core.set_cublas_switch(True) # restore the switch else: pass diff --git a/python/paddle/fluid/tests/unittests/test_top_k_v2_op.py b/python/paddle/fluid/tests/unittests/test_top_k_v2_op.py index 4e2aecaca13..f8a541380a8 100644 --- a/python/paddle/fluid/tests/unittests/test_top_k_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_top_k_v2_op.py @@ -184,25 +184,31 @@ class TestTopKAPI(unittest.TestCase): # test case for basic test case 1 paddle_result = paddle.topk(input_tensor, k=2) numpy_result = numpy_topk(self.input_data, k=2) - self.assertTrue( - np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue( - np.allclose(paddle_result[1].numpy(), numpy_result[1])) + np.testing.assert_allclose(paddle_result[0].numpy(), + numpy_result[0], + rtol=1e-05) + np.testing.assert_allclose(paddle_result[1].numpy(), + numpy_result[1], + rtol=1e-05) # test case for basic test case 2 with axis paddle_result = paddle.topk(input_tensor, k=2, axis=1) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - self.assertTrue( - np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue( - np.allclose(paddle_result[1].numpy(), numpy_result[1])) + np.testing.assert_allclose(paddle_result[0].numpy(), + numpy_result[0], + rtol=1e-05) + np.testing.assert_allclose(paddle_result[1].numpy(), + numpy_result[1], + rtol=1e-05) # test case for basic test case 3 with tensor K k_tensor = paddle.to_tensor(np.array([2])) paddle_result = paddle.topk(input_tensor, k=k_tensor, axis=1) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - self.assertTrue( - np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue( - np.allclose(paddle_result[1].numpy(), numpy_result[1])) + np.testing.assert_allclose(paddle_result[0].numpy(), + numpy_result[0], + rtol=1e-05) + np.testing.assert_allclose(paddle_result[1].numpy(), + numpy_result[1], + rtol=1e-05) # test case for basic test case 4 with tensor largest k_tensor = paddle.to_tensor(np.array([2])) paddle_result = paddle.topk(input_tensor, @@ -213,10 +219,12 @@ class TestTopKAPI(unittest.TestCase): k=2, axis=1, largest=False) - self.assertTrue( - np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue( - np.allclose(paddle_result[1].numpy(), numpy_result[1])) + np.testing.assert_allclose(paddle_result[0].numpy(), + numpy_result[0], + rtol=1e-05) + np.testing.assert_allclose(paddle_result[1].numpy(), + numpy_result[1], + rtol=1e-05) # test case for basic test case 5 with axis -1 k_tensor = paddle.to_tensor(np.array([2])) paddle_result = paddle.topk(input_tensor, @@ -227,24 +235,30 @@ class TestTopKAPI(unittest.TestCase): k=2, axis=-1, largest=False) - self.assertTrue( - np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue( - np.allclose(paddle_result[1].numpy(), numpy_result[1])) + np.testing.assert_allclose(paddle_result[0].numpy(), + numpy_result[0], + rtol=1e-05) + np.testing.assert_allclose(paddle_result[1].numpy(), + numpy_result[1], + rtol=1e-05) # test case for basic test case 6 for the partial sort paddle_result = paddle.topk(large_input_tensor, k=1, axis=-1) numpy_result = numpy_topk(self.large_input_data, k=1, axis=-1) - self.assertTrue( - np.allclose(paddle_result[0].numpy(), numpy_result[0])) - self.assertTrue( - np.allclose(paddle_result[1].numpy(), numpy_result[1])) + np.testing.assert_allclose(paddle_result[0].numpy(), + numpy_result[0], + rtol=1e-05) + np.testing.assert_allclose(paddle_result[1].numpy(), + numpy_result[1], + rtol=1e-05) # test case for basic test case 7 for the unsorted paddle_result = paddle.topk(input_tensor, k=2, axis=1, sorted=False) sort_paddle = numpy_topk(np.array(paddle_result[0].numpy()), axis=1, k=2) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - self.assertTrue(np.allclose(sort_paddle[0], numpy_result[0])) + np.testing.assert_allclose(sort_paddle[0], + numpy_result[0], + rtol=1e-05) def run_static(self, place): paddle.enable_static() @@ -282,32 +296,58 @@ class TestTopKAPI(unittest.TestCase): result7[0], result7[1] ]) numpy_result = numpy_topk(self.input_data, k=2) - self.assertTrue(np.allclose(paddle_result[0], numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[1], numpy_result[1])) + np.testing.assert_allclose(paddle_result[0], + numpy_result[0], + rtol=1e-05) + np.testing.assert_allclose(paddle_result[1], + numpy_result[1], + rtol=1e-05) numpy_result = numpy_topk(self.input_data, k=2, axis=-1) - self.assertTrue(np.allclose(paddle_result[2], numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[3], numpy_result[1])) + np.testing.assert_allclose(paddle_result[2], + numpy_result[0], + rtol=1e-05) + np.testing.assert_allclose(paddle_result[3], + numpy_result[1], + rtol=1e-05) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - self.assertTrue(np.allclose(paddle_result[4], numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[5], numpy_result[1])) + np.testing.assert_allclose(paddle_result[4], + numpy_result[0], + rtol=1e-05) + np.testing.assert_allclose(paddle_result[5], + numpy_result[1], + rtol=1e-05) numpy_result = numpy_topk(self.input_data, k=2, axis=1, largest=False) - self.assertTrue(np.allclose(paddle_result[6], numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[7], numpy_result[1])) + np.testing.assert_allclose(paddle_result[6], + numpy_result[0], + rtol=1e-05) + np.testing.assert_allclose(paddle_result[7], + numpy_result[1], + rtol=1e-05) numpy_result = numpy_topk(self.input_data, k=2, axis=-1, largest=False) - self.assertTrue(np.allclose(paddle_result[8], numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[9], numpy_result[1])) + np.testing.assert_allclose(paddle_result[8], + numpy_result[0], + rtol=1e-05) + np.testing.assert_allclose(paddle_result[9], + numpy_result[1], + rtol=1e-05) numpy_result = numpy_topk(self.large_input_data, k=1, axis=-1) - self.assertTrue(np.allclose(paddle_result[10], numpy_result[0])) - self.assertTrue(np.allclose(paddle_result[11], numpy_result[1])) + np.testing.assert_allclose(paddle_result[10], + numpy_result[0], + rtol=1e-05) + np.testing.assert_allclose(paddle_result[11], + numpy_result[1], + rtol=1e-05) sort_paddle = numpy_topk(paddle_result[12], axis=1, k=2) numpy_result = numpy_topk(self.input_data, k=2, axis=1) - self.assertTrue(np.allclose(sort_paddle[0], numpy_result[0])) + np.testing.assert_allclose(sort_paddle[0], + numpy_result[0], + rtol=1e-05) def test_cases(self): places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_trace_op.py b/python/paddle/fluid/tests/unittests/test_trace_op.py index 7aefd11790b..b17bb9c39bf 100644 --- a/python/paddle/fluid/tests/unittests/test_trace_op.py +++ b/python/paddle/fluid/tests/unittests/test_trace_op.py @@ -85,8 +85,8 @@ class TestTraceAPICase(unittest.TestCase): return_numpy=True) target1 = np.trace(case) target2 = np.trace(case, offset=-5, axis1=1, axis2=-1) - self.assertTrue(np.allclose(results[0], target1)) - self.assertTrue(np.allclose(results[1], target2)) + np.testing.assert_allclose(results[0], target1, rtol=1e-05) + np.testing.assert_allclose(results[1], target2, rtol=1e-05) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py b/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py index 32363e29f1a..2dc9c28b083 100644 --- a/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py @@ -272,7 +272,7 @@ class TestTriangularSolveAPI(unittest.TestCase): "y": y_np }, fetch_list=[z]) - self.assertTrue(np.allclose(fetches[0], z_np)) + np.testing.assert_allclose(fetches[0], z_np, rtol=1e-05) def test_static(self): for place in self.place: @@ -290,7 +290,7 @@ class TestTriangularSolveAPI(unittest.TestCase): y = paddle.to_tensor(y_np) z = paddle.linalg.triangular_solve(x, y, upper=False) - self.assertTrue(np.allclose(z_np, z.numpy())) + np.testing.assert_allclose(z_np, z.numpy(), rtol=1e-05) self.assertEqual(z_np.shape, z.numpy().shape) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_tril_triu_op.py b/python/paddle/fluid/tests/unittests/test_tril_triu_op.py index 3ed9e517098..40f8bc3593a 100644 --- a/python/paddle/fluid/tests/unittests/test_tril_triu_op.py +++ b/python/paddle/fluid/tests/unittests/test_tril_triu_op.py @@ -150,8 +150,8 @@ class TestTrilTriuOpAPI(unittest.TestCase): feed={"x": data}, fetch_list=[tril_out, triu_out], ) - self.assertTrue(np.allclose(tril_out, np.tril(data))) - self.assertTrue(np.allclose(triu_out, np.triu(data))) + np.testing.assert_allclose(tril_out, np.tril(data), rtol=1e-05) + np.testing.assert_allclose(triu_out, np.triu(data), rtol=1e-05) def test_api_with_dygraph(self): paddle.disable_static() @@ -163,8 +163,8 @@ class TestTrilTriuOpAPI(unittest.TestCase): x = fluid.dygraph.to_variable(data) tril_out, triu_out = tensor.tril(x).numpy(), tensor.triu( x).numpy() - self.assertTrue(np.allclose(tril_out, np.tril(data))) - self.assertTrue(np.allclose(triu_out, np.triu(data))) + np.testing.assert_allclose(tril_out, np.tril(data), rtol=1e-05) + np.testing.assert_allclose(triu_out, np.triu(data), rtol=1e-05) def test_fluid_api(self): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py index 717f1b60049..53ae94c331f 100755 --- a/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_trilinear_interp_op.py @@ -677,10 +677,11 @@ class TestTrilinearInterpAPI(unittest.TestCase): out_h=18, out_w=8, align_mode=1) - self.assertTrue( - np.allclose(results[0], np.transpose(expect_res, (0, 2, 3, 4, 1)))) + np.testing.assert_allclose(results[0], + np.transpose(expect_res, (0, 2, 3, 4, 1)), + rtol=1e-05) for i in range(len(results) - 1): - self.assertTrue(np.allclose(results[i + 1], expect_res)) + np.testing.assert_allclose(results[i + 1], expect_res, rtol=1e-05) class TestTrilinearInterpOpException(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py index f494767d8d0..4c799a826b2 100755 --- a/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_trilinear_interp_v2_op.py @@ -736,10 +736,11 @@ class TestTrilinearInterpAPI(unittest.TestCase): out_h=18, out_w=8, align_mode=1) - self.assertTrue( - np.allclose(results[0], np.transpose(expect_res, (0, 2, 3, 4, 1)))) + np.testing.assert_allclose(results[0], + np.transpose(expect_res, (0, 2, 3, 4, 1)), + rtol=1e-05) for i in range(len(results) - 1): - self.assertTrue(np.allclose(results[i + 1], expect_res)) + np.testing.assert_allclose(results[i + 1], expect_res, rtol=1e-05) class TestTrilinearInterpOpException(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_triplet_margin_loss.py b/python/paddle/fluid/tests/unittests/test_triplet_margin_loss.py index 745cb6a1780..bf6faeca556 100644 --- a/python/paddle/fluid/tests/unittests/test_triplet_margin_loss.py +++ b/python/paddle/fluid/tests/unittests/test_triplet_margin_loss.py @@ -109,7 +109,7 @@ def test_static(place, reduction=reduction) exe = paddle.static.Executor(place) - static_result = exe.run(prog, feed=feed_dict, fetch_list=[res]) + static_result = exe.run(prog, feed=feed_dict, fetch_list=[res])[0] return static_result @@ -212,9 +212,18 @@ class TestTripletMarginLoss(unittest.TestCase): negative_np=negative, reduction=reduction, ) - self.assertTrue(np.allclose(static_result, expected)) - self.assertTrue(np.allclose(static_result, dy_result)) - self.assertTrue(np.allclose(dy_result, expected)) + np.testing.assert_allclose(static_result, + expected, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(static_result, + dy_result, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(dy_result, + expected, + rtol=1e-5, + atol=1e-8) static_functional = test_static(place=place, input_np=input, positive_np=positive, @@ -227,9 +236,18 @@ class TestTripletMarginLoss(unittest.TestCase): negative=negative, reduction=reduction, functional=True) - self.assertTrue(np.allclose(static_functional, expected)) - self.assertTrue(np.allclose(static_functional, dy_functional)) - self.assertTrue(np.allclose(dy_functional, expected)) + np.testing.assert_allclose(static_functional, + expected, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(static_functional, + dy_functional, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(dy_functional, + expected, + rtol=1e-5, + atol=1e-8) def test_TripletMarginLoss_error(self): paddle.disable_static() @@ -300,9 +318,15 @@ class TestTripletMarginLoss(unittest.TestCase): negative_np=negative, reduction=reduction, ) - self.assertTrue(np.allclose(static_result, expected)) - self.assertTrue(np.allclose(static_result, dy_result)) - self.assertTrue(np.allclose(dy_result, expected)) + np.testing.assert_allclose(static_result, + expected, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(static_result, + dy_result, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(dy_result, expected, rtol=1e-5, atol=1e-8) static_functional = test_static(place=place, swap=True, input_np=input, @@ -317,9 +341,18 @@ class TestTripletMarginLoss(unittest.TestCase): negative=negative, reduction=reduction, functional=True) - self.assertTrue(np.allclose(static_functional, expected)) - self.assertTrue(np.allclose(static_functional, dy_functional)) - self.assertTrue(np.allclose(dy_functional, expected)) + np.testing.assert_allclose(static_functional, + expected, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(static_functional, + dy_functional, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(dy_functional, + expected, + rtol=1e-5, + atol=1e-8) def test_TripletMarginLoss_margin(self): paddle.disable_static() @@ -369,9 +402,15 @@ class TestTripletMarginLoss(unittest.TestCase): negative_np=negative, reduction=reduction, ) - self.assertTrue(np.allclose(static_result, expected)) - self.assertTrue(np.allclose(static_result, dy_result)) - self.assertTrue(np.allclose(dy_result, expected)) + np.testing.assert_allclose(static_result, + expected, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(static_result, + dy_result, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(dy_result, expected, rtol=1e-5, atol=1e-8) static_functional = test_static(place=place, p=p, input_np=input, @@ -386,9 +425,18 @@ class TestTripletMarginLoss(unittest.TestCase): negative=negative, reduction=reduction, functional=True) - self.assertTrue(np.allclose(static_functional, expected)) - self.assertTrue(np.allclose(static_functional, dy_functional)) - self.assertTrue(np.allclose(dy_functional, expected)) + np.testing.assert_allclose(static_functional, + expected, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(static_functional, + dy_functional, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(dy_functional, + expected, + rtol=1e-5, + atol=1e-8) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_triplet_margin_with_distance_loss.py b/python/paddle/fluid/tests/unittests/test_triplet_margin_with_distance_loss.py index 0fb8ae22c26..8b7f79b77ab 100644 --- a/python/paddle/fluid/tests/unittests/test_triplet_margin_with_distance_loss.py +++ b/python/paddle/fluid/tests/unittests/test_triplet_margin_with_distance_loss.py @@ -106,7 +106,7 @@ def test_static(place, reduction=reduction) exe = paddle.static.Executor(place) - static_result = exe.run(prog, feed=feed_dict, fetch_list=[res]) + static_result = exe.run(prog, feed=feed_dict, fetch_list=[res])[0] return static_result @@ -211,9 +211,18 @@ class TestTripletMarginWithDistanceLoss(unittest.TestCase): negative_np=negative, reduction=reduction, ) - self.assertTrue(np.allclose(static_result, expected)) - self.assertTrue(np.allclose(static_result, dy_result)) - self.assertTrue(np.allclose(dy_result, expected)) + np.testing.assert_allclose(static_result, + expected, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(static_result, + dy_result, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(dy_result, + expected, + rtol=1e-5, + atol=1e-8) static_functional = test_static(place=place, input_np=input, positive_np=positive, @@ -226,9 +235,18 @@ class TestTripletMarginWithDistanceLoss(unittest.TestCase): negative=negative, reduction=reduction, functional=True) - self.assertTrue(np.allclose(static_functional, expected)) - self.assertTrue(np.allclose(static_functional, dy_functional)) - self.assertTrue(np.allclose(dy_functional, expected)) + np.testing.assert_allclose(static_functional, + expected, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(static_functional, + dy_functional, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(dy_functional, + expected, + rtol=1e-5, + atol=1e-8) def test_TripletMarginDistanceLoss_error(self): paddle.disable_static() @@ -281,7 +299,10 @@ class TestTripletMarginWithDistanceLoss(unittest.TestCase): distance_function=distance_function, reduction=reduction, ) - self.assertTrue(np.allclose(static_result, dy_result)) + np.testing.assert_allclose(static_result, + dy_result, + rtol=1e-5, + atol=1e-8) static_functional = test_static(place=place, input_np=input, positive_np=positive, @@ -296,7 +317,10 @@ class TestTripletMarginWithDistanceLoss(unittest.TestCase): distance_function=distance_function, reduction=reduction, functional=True) - self.assertTrue(np.allclose(static_functional, dy_functional)) + np.testing.assert_allclose(static_functional, + dy_functional, + rtol=1e-5, + atol=1e-8) def test_TripletMarginWithDistanceLoss_distance_funtion_error(self): paddle.disable_static() @@ -374,9 +398,15 @@ class TestTripletMarginWithDistanceLoss(unittest.TestCase): negative_np=negative, reduction=reduction, ) - self.assertTrue(np.allclose(static_result, expected)) - self.assertTrue(np.allclose(static_result, dy_result)) - self.assertTrue(np.allclose(dy_result, expected)) + np.testing.assert_allclose(static_result, + expected, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(static_result, + dy_result, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(dy_result, expected, rtol=1e-5, atol=1e-8) static_functional = test_static(place=place, swap=True, input_np=input, @@ -391,9 +421,18 @@ class TestTripletMarginWithDistanceLoss(unittest.TestCase): negative=negative, reduction=reduction, functional=True) - self.assertTrue(np.allclose(static_functional, expected)) - self.assertTrue(np.allclose(static_functional, dy_functional)) - self.assertTrue(np.allclose(dy_functional, expected)) + np.testing.assert_allclose(static_functional, + expected, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(static_functional, + dy_functional, + rtol=1e-5, + atol=1e-8) + np.testing.assert_allclose(dy_functional, + expected, + rtol=1e-5, + atol=1e-8) def test_TripletMarginWithDistanceLoss_margin(self): paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_trunc_op.py b/python/paddle/fluid/tests/unittests/test_trunc_op.py index 56a39e5f692..207c55baf01 100644 --- a/python/paddle/fluid/tests/unittests/test_trunc_op.py +++ b/python/paddle/fluid/tests/unittests/test_trunc_op.py @@ -82,14 +82,14 @@ class TestTruncAPI(unittest.TestCase): res = exe.run(feed={'X': self.x}, fetch_list=[out]) out_ref = np.trunc(self.x) for out in res: - self.assertEqual(np.allclose(out, out_ref, rtol=1e-08), True) + np.testing.assert_allclose(out, out_ref, rtol=1e-08) def test_api_dygraph(self): paddle.disable_static(self.place) x_tensor = paddle.to_tensor(self.x) out = paddle.trunc(x_tensor) out_ref = np.trunc(self.x) - self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-08), True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-08) paddle.enable_static() def test_api_eager(self): @@ -99,7 +99,7 @@ class TestTruncAPI(unittest.TestCase): x_tensor = paddle.to_tensor(self.x) out = paddle.trunc(x_tensor) out_ref = np.trunc(self.x) - self.assertEqual(np.allclose(out.numpy(), out_ref, rtol=1e-08), True) + np.testing.assert_allclose(out.numpy(), out_ref, rtol=1e-08) paddle.enable_static() def test_api_eager_dygraph(self): diff --git a/python/paddle/fluid/tests/unittests/test_unfold_op.py b/python/paddle/fluid/tests/unittests/test_unfold_op.py index c990b67f9a4..7db543e840a 100644 --- a/python/paddle/fluid/tests/unittests/test_unfold_op.py +++ b/python/paddle/fluid/tests/unittests/test_unfold_op.py @@ -121,7 +121,9 @@ class TestUnfoldAPI(TestUnfoldOp): m = paddle.nn.Unfold(**self.attrs) m.eval() result = m(input) - self.assertTrue(np.allclose(result.numpy(), self.outputs['Y'])) + np.testing.assert_allclose(result.numpy(), + self.outputs['Y'], + rtol=1e-05) def test_info(self): str(paddle.nn.Unfold(**self.attrs)) diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py index 27dda75a736..464374782a9 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py @@ -49,8 +49,7 @@ class TestUniformRandomOpBF16(OpTest): result = np.array(outs[0]) hist, prob = self.output_hist(result) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) def test_check_output(self): outs = self.calc_output(core.CPUPlace()) @@ -130,8 +129,7 @@ class TestUniformRandomOpBF16SelectedRows(unittest.TestCase): self.assertEqual(out.get_tensor().shape(), [1000, 784]) result = convert_uint16_to_float(np.array(out.get_tensor())) hist, prob = output_hist(result) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestUniformRandomOpBF16SelectedRowsWithDiagInit( @@ -155,8 +153,7 @@ class TestUniformRandomOpBF16SelectedRowsWithDiagInit( self.assertEqual(out.get_tensor().shape(), [500, 784]) result = convert_uint16_to_float(np.array(out.get_tensor())) hist, prob = output_hist(result) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestUniformRandomOpBF16AttrTensorAPI(unittest.TestCase): @@ -227,8 +224,7 @@ class TestUniformRandomOpBF16SelectedRowsShapeTensor(unittest.TestCase): self.assertEqual(out.get_tensor().shape(), [1000, 784]) result = convert_uint16_to_float(np.array(out.get_tensor())) hist, prob = output_hist(result) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestUniformRandomOpBF16SelectedRowsShapeTensorList( @@ -254,8 +250,7 @@ class TestUniformRandomOpBF16SelectedRowsShapeTensorList( self.assertEqual(out.get_tensor().shape(), [1000, 784]) result = convert_uint16_to_float(np.array(out.get_tensor())) hist, prob = output_hist(result) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestUniformRandomBatchSizeLikeOpBF16API(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_inplace_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_inplace_op.py index d677d3b34c6..2b3881535c9 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_inplace_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_inplace_op.py @@ -131,7 +131,7 @@ class TestUniformRandomInplaceOpDistribution(unittest.TestCase): hist, _ = np.histogram(tensor.numpy()[0], bins=self.bins) prob = hist / float(self.shape[0]) prob_expect = np.ones((self.bins, )) / float(self.bins) - self.assertTrue(np.allclose(prob, prob_expect, rtol=0, atol=1e-2)) + np.testing.assert_allclose(prob, prob_expect, rtol=0, atol=0.01) class TestUniformRandomInplaceOpError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index d80fe3b2d47..a368697c934 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -73,8 +73,7 @@ class TestUniformRandomOp_attr_tensorlist(OpTest): def verify_output(self, outs): hist, prob = self.output_hist(np.array(outs[0])) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestMaxMinAreInt(TestUniformRandomOp_attr_tensorlist): @@ -107,8 +106,7 @@ class TestUniformRandomOp_attr_tensorlist_int32(OpTest): def verify_output(self, outs): hist, prob = self.output_hist(np.array(outs[0])) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestUniformRandomOp_attr_tensor(OpTest): @@ -129,8 +127,7 @@ class TestUniformRandomOp_attr_tensor(OpTest): def verify_output(self, outs): hist, prob = self.output_hist(np.array(outs[0])) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestUniformRandomOp_attr_tensor_int32(OpTest): @@ -151,8 +148,7 @@ class TestUniformRandomOp_attr_tensor_int32(OpTest): def verify_output(self, outs): hist, prob = self.output_hist(np.array(outs[0])) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestUniformRandomOp(OpTest): @@ -178,8 +174,7 @@ class TestUniformRandomOp(OpTest): def verify_output(self, outs): hist, prob = self.output_hist(np.array(outs[0])) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) def test_check_api(self): places = self._get_places() @@ -269,8 +264,7 @@ class TestUniformRandomOpSelectedRows(unittest.TestCase): op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) hist, prob = output_hist(np.array(out.get_tensor())) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestUniformRandomOpSelectedRowsWithDiagInit( @@ -292,8 +286,7 @@ class TestUniformRandomOpSelectedRowsWithDiagInit( op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [500, 784]) hist, prob = output_hist_diag(np.array(out.get_tensor())) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestUniformRandomOpApi(unittest.TestCase): @@ -429,8 +422,7 @@ class TestUniformRandomOpSelectedRowsShapeTensor(unittest.TestCase): op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) hist, prob = output_hist(np.array(out.get_tensor())) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestUniformRandomOpSelectedRowsShapeTensorList(unittest.TestCase): @@ -462,8 +454,7 @@ class TestUniformRandomOpSelectedRowsShapeTensorList(unittest.TestCase): op.run(scope, place) self.assertEqual(out.get_tensor().shape(), [1000, 784]) hist, prob = output_hist(np.array(out.get_tensor())) - self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01), - "hist: " + str(hist)) + np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) class TestUniformRandomDygraphMode(unittest.TestCase): @@ -625,7 +616,9 @@ class TestRandomValue(unittest.TestCase): out = paddle.rand([32, 3, 1024, 1024], dtype='float64').numpy() self.assertEqual(np.mean(out), expect_mean) self.assertEqual(np.std(out), expect_std) - self.assertTrue(np.allclose(out[2, 1, 512, 1000:1010], expect)) + np.testing.assert_allclose(out[2, 1, 512, 1000:1010], + expect, + rtol=1e-05) expect_mean = 0.50002604722976684570312500 expect_std = 0.2886914908885955810546875 @@ -636,7 +629,9 @@ class TestRandomValue(unittest.TestCase): out = paddle.rand([32, 3, 1024, 1024], dtype='float32').numpy() self.assertEqual(np.mean(out), expect_mean) self.assertEqual(np.std(out), expect_std) - self.assertTrue(np.allclose(out[2, 1, 512, 1000:1010], expect)) + np.testing.assert_allclose(out[2, 1, 512, 1000:1010], + expect, + rtol=1e-05) expect_mean = 25.11843109130859375 expect_std = 43.370647430419921875 @@ -648,7 +643,7 @@ class TestRandomValue(unittest.TestCase): dtype='float32').uniform_(-50, 100).numpy() self.assertEqual(np.mean(out), expect_mean) self.assertEqual(np.std(out), expect_std) - self.assertTrue(np.allclose(out[10, 10, 10, 0:10], expect)) + np.testing.assert_allclose(out[10, 10, 10, 0:10], expect, rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_unique.py b/python/paddle/fluid/tests/unittests/test_unique.py index b70a342ab82..789d52505ae 100644 --- a/python/paddle/fluid/tests/unittests/test_unique.py +++ b/python/paddle/fluid/tests/unittests/test_unique.py @@ -280,9 +280,9 @@ class TestUniqueAPI(unittest.TestCase): return_inverse=True, return_counts=True, axis=0) - self.assertTrue(np.allclose(result[0], np_unique)) - self.assertTrue(np.allclose(result[1], np_inverse)) - self.assertTrue(np.allclose(result[2], np_counts)) + np.testing.assert_allclose(result[0], np_unique, rtol=1e-05) + np.testing.assert_allclose(result[1], np_inverse, rtol=1e-05) + np.testing.assert_allclose(result[2], np_counts, rtol=1e-05) class TestUniqueError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_unpool1d_op.py b/python/paddle/fluid/tests/unittests/test_unpool1d_op.py index 30c58d3477c..10fb87c61be 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool1d_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool1d_op.py @@ -74,8 +74,9 @@ class TestUnpool1DOpAPI_dygraph(unittest.TestCase): stride=2) expected_output_unpool = unpool1dmax_forward_naive( output.numpy(), indices.numpy(), [2], [2], [0], [16]) - self.assertTrue( - np.allclose(output_unpool.numpy(), expected_output_unpool)) + np.testing.assert_allclose(output_unpool.numpy(), + expected_output_unpool, + rtol=1e-05) paddle.enable_static() @@ -100,8 +101,9 @@ class TestUnpool1DOpAPI_dygraph2(unittest.TestCase): stride=None) expected_output_unpool = unpool1dmax_forward_naive( output.numpy(), indices.numpy(), [2], [2], [0], [16]) - self.assertTrue( - np.allclose(output_unpool.numpy(), expected_output_unpool)) + np.testing.assert_allclose(output_unpool.numpy(), + expected_output_unpool, + rtol=1e-05) paddle.enable_static() @@ -125,8 +127,9 @@ class TestUnpool1DOpAPI_dygraph3(unittest.TestCase): output_unpool = UnPool1d(output, indices) expected_output_unpool = unpool1dmax_forward_naive( output.numpy(), indices.numpy(), [2], [2], [0], [16]) - self.assertTrue( - np.allclose(output_unpool.numpy(), expected_output_unpool)) + np.testing.assert_allclose(output_unpool.numpy(), + expected_output_unpool, + rtol=1e-05) paddle.enable_static() @@ -167,7 +170,9 @@ class TestUnpool1DOpAPI_static(unittest.TestCase): 3]]]).astype("int32") expected_output_unpool = unpool1dmax_forward_naive( pool1d_out_np, indices_np, [2], [2], [0], [4]) - self.assertTrue(np.allclose(fetches[0], expected_output_unpool)) + np.testing.assert_allclose(fetches[0], + expected_output_unpool, + rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_unpool3d_op.py b/python/paddle/fluid/tests/unittests/test_unpool3d_op.py index ec04ca56a54..f7267a60b1e 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool3d_op.py @@ -235,8 +235,9 @@ class TestUnpool3DOpAPI_dygraph(unittest.TestCase): expected_output_unpool = unpool3dmax_forward_naive( output.numpy(), indices.numpy(), [2, 2, 2], [2, 2, 2], [0, 0, 0], [4, 4, 6]) - self.assertTrue( - np.allclose(output_unpool.numpy(), expected_output_unpool)) + np.testing.assert_allclose(output_unpool.numpy(), + expected_output_unpool, + rtol=1e-05) paddle.enable_static() @@ -262,8 +263,9 @@ class TestUnpool3DOpAPI_dygraph2(unittest.TestCase): expected_output_unpool = unpool3dmax_forward_naive( output.numpy(), indices.numpy(), [2, 2, 2], [2, 2, 2], [0, 0, 0], [4, 4, 6]) - self.assertTrue( - np.allclose(output_unpool.numpy(), expected_output_unpool)) + np.testing.assert_allclose(output_unpool.numpy(), + expected_output_unpool, + rtol=1e-05) paddle.enable_static() @@ -288,8 +290,9 @@ class TestUnpool3DOpAPI_dygraph3(unittest.TestCase): expected_output_unpool = unpool3dmax_forward_naive( output.numpy(), indices.numpy(), [2, 2, 2], [2, 2, 2], [0, 0, 0], [4, 4, 6]) - self.assertTrue( - np.allclose(output_unpool.numpy(), expected_output_unpool)) + np.testing.assert_allclose(output_unpool.numpy(), + expected_output_unpool, + rtol=1e-05) paddle.enable_static() @@ -331,7 +334,9 @@ class TestUnpool3DOpAPI_static(unittest.TestCase): expected_output_unpool = unpool3dmax_forward_naive( pool3d_out_np, indices_np, [2, 2, 2], [2, 2, 2], [0, 0, 0], [2, 4, 4]) - self.assertTrue(np.allclose(fetches[0], expected_output_unpool)) + np.testing.assert_allclose(fetches[0], + expected_output_unpool, + rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_unpool_op.py b/python/paddle/fluid/tests/unittests/test_unpool_op.py index 07e44ce65d6..8c6d29de545 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool_op.py @@ -244,7 +244,7 @@ class TestUnpoolOpAPI_dy(unittest.TestCase): indices_np = indices.numpy() expect_res =unpool2dmax_forward_naive(output_np, indices_np, [2,2], \ [2,2], [0,0], [5,5]).astype("float64") - self.assertTrue(np.allclose(out_pp.numpy(), expect_res)) + np.testing.assert_allclose(out_pp.numpy(), expect_res, rtol=1e-05) class TestUnpoolOpAPI_dy2(unittest.TestCase): @@ -278,7 +278,7 @@ class TestUnpoolOpAPI_dy2(unittest.TestCase): indices_np = indices.numpy() expect_res =unpool2dmax_forward_naive(output_np, indices_np, [2,2], \ [2,2], [0,0], [5,5]).astype("float64") - self.assertTrue(np.allclose(out_pp.numpy(), expect_res)) + np.testing.assert_allclose(out_pp.numpy(), expect_res, rtol=1e-05) class TestUnpoolOpAPI_dy3(unittest.TestCase): @@ -310,7 +310,7 @@ class TestUnpoolOpAPI_dy3(unittest.TestCase): indices_np = indices.numpy() expect_res =unpool2dmax_forward_naive(output_np, indices_np, [2,2], \ [2,2], [0,0], [4,4]).astype("float64") - self.assertTrue(np.allclose(out_pp.numpy(), expect_res)) + np.testing.assert_allclose(out_pp.numpy(), expect_res, rtol=1e-05) class TestUnpoolOpAPI_st(unittest.TestCase): @@ -351,7 +351,7 @@ class TestUnpoolOpAPI_st(unittest.TestCase): indices_np = np.array([[[[5, 7], [13, 15]]]]).astype("int32") expect_res =unpool2dmax_forward_naive(pool_out_np, indices_np, [2,2], \ [2,2], [0,0], [5,5]).astype("float64") - self.assertTrue(np.allclose(results[0], expect_res)) + np.testing.assert_allclose(results[0], expect_res, rtol=1e-05) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py index fa0234227d4..04cf0f1c010 100755 --- a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py @@ -127,7 +127,7 @@ class API_TestUnsqueeze(unittest.TestCase): input = np.squeeze(input1, axis=1) result, = exe.run(feed={"data1": input}, fetch_list=[result_squeeze]) - self.assertTrue(np.allclose(input1, result)) + np.testing.assert_allclose(input1, result, rtol=1e-05) class TestUnsqueezeOpError(unittest.TestCase): @@ -165,7 +165,7 @@ class API_TestUnsqueeze2(unittest.TestCase): "data2": input2 }, fetch_list=[result_squeeze]) - self.assertTrue(np.allclose(input1, result1)) + np.testing.assert_allclose(input1, result1, rtol=1e-05) class API_TestUnsqueeze3(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index e670781bee7..2d5778b7b20 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -1623,11 +1623,11 @@ class TestVarBaseTo(unittest.TestCase): def func_test_to_api(self): x_double = self.x._to(dtype='double') self.assertEqual(x_double.dtype, paddle.fluid.core.VarDesc.VarType.FP64) - self.assertTrue(np.allclose(self.np_x, x_double)) + np.testing.assert_allclose(self.np_x, x_double, rtol=1e-05) x_ = self.x._to() self.assertEqual(self.x.dtype, paddle.fluid.core.VarDesc.VarType.FP64) - self.assertTrue(np.allclose(self.np_x, x_)) + np.testing.assert_allclose(self.np_x, x_, rtol=1e-05) if paddle.fluid.is_compiled_with_cuda(): x_gpu = self.x._to(device=paddle.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_variance_layer.py b/python/paddle/fluid/tests/unittests/test_variance_layer.py index cf46d82b11d..ca0ad7e9fd9 100644 --- a/python/paddle/fluid/tests/unittests/test_variance_layer.py +++ b/python/paddle/fluid/tests/unittests/test_variance_layer.py @@ -63,7 +63,7 @@ class TestVarAPI(unittest.TestCase): out_dygraph = self.dygraph() out_static = self.static() for out in [out_dygraph, out_static]: - self.assertTrue(np.allclose(out_ref, out)) + np.testing.assert_allclose(out_ref, out, rtol=1e-05) self.assertTrue(np.equal(out_ref.shape, out.shape).all()) @@ -111,8 +111,8 @@ class TestVarAPI_alias(unittest.TestCase): out1 = paddle.var(x).numpy() out2 = paddle.tensor.var(x).numpy() out3 = paddle.tensor.stat.var(x).numpy() - self.assertTrue(np.allclose(out1, out2)) - self.assertTrue(np.allclose(out1, out3)) + np.testing.assert_allclose(out1, out2, rtol=1e-05) + np.testing.assert_allclose(out1, out3, rtol=1e-05) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_warpctc_op.py b/python/paddle/fluid/tests/unittests/test_warpctc_op.py index ad31bbd58a9..6a516e3addd 100644 --- a/python/paddle/fluid/tests/unittests/test_warpctc_op.py +++ b/python/paddle/fluid/tests/unittests/test_warpctc_op.py @@ -592,8 +592,11 @@ class TestCTCLossAPICase(unittest.TestCase): loss_np_mean = (loss_np / labels_length.numpy()).mean() loss_np_sum = loss_np.sum() - self.assertTrue(np.allclose(loss_pd_mean, loss_np_mean, atol=1)) - self.assertTrue(np.allclose(loss_pd_sum, loss_np_sum, atol=1)) + np.testing.assert_allclose(loss_pd_mean, + loss_np_mean, + rtol=1e-05, + atol=1) + np.testing.assert_allclose(loss_pd_sum, loss_np_sum, rtol=1e-05, atol=1) def test_class_api(self): self.batch_size = 3 @@ -633,7 +636,7 @@ class TestCTCLossAPICase(unittest.TestCase): paddle.enable_static() loss_np = np.squeeze(loss_np, axis=-1) - self.assertTrue(np.allclose(loss_pd, loss_np, atol=1)) + np.testing.assert_allclose(loss_pd, loss_np, rtol=1e-05, atol=1) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_weight_normalization.py b/python/paddle/fluid/tests/unittests/test_weight_normalization.py index 95cfe40084f..553fb8cf7ad 100644 --- a/python/paddle/fluid/tests/unittests/test_weight_normalization.py +++ b/python/paddle/fluid/tests/unittests/test_weight_normalization.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest -import numpy +import numpy as np import collections import paddle.fluid as fluid import paddle.fluid.core as core @@ -75,13 +75,12 @@ class TestWeightNormalization(unittest.TestCase): data_lod_level = desc[2] data_lod = [] for i in range(data_lod_level): - lod_level_i = numpy.random.randint( - low=1, - high=5, - size=self.batch_size - if i == 0 else sum(lod_level_i)).tolist() + lod_level_i = np.random.randint(low=1, + high=5, + size=self.batch_size if i == 0 + else sum(lod_level_i)).tolist() data_lod.append(lod_level_i) - data_value = numpy.random.random( + data_value = np.random.random( size=[sum(data_lod[-1]) if data_lod else self.batch_size] + data_shape).astype('float32') self.data[data_name] = (data_value, data_lod) @@ -96,14 +95,14 @@ class TestWeightNormalization(unittest.TestCase): self.inputs[desc[0]] = tensor def weight_normalize(self): - v = numpy.ones( + v = np.ones( (self.data[self.data_desc[0][0]][0].shape[-1], self.hidden_size)) - g = numpy.linalg.norm(v, axis=None, keepdims=True) - w = g * v / numpy.linalg.norm(v, axis=None, keepdims=True) + g = np.linalg.norm(v, axis=None, keepdims=True) + w = g * v / np.linalg.norm(v, axis=None, keepdims=True) x = self.data[self.data_desc[0][0]][0] - out = numpy.dot(x, w) - g_grad = (numpy.dot(x.T, numpy.ones_like(out)) * - (v / numpy.linalg.norm(v, axis=None, keepdims=True))).sum( + out = np.dot(x, w) + g_grad = (np.dot(x.T, np.ones_like(out)) * + (v / np.linalg.norm(v, axis=None, keepdims=True))).sum( axis=None, keepdims=True) return g, v, g_grad @@ -113,8 +112,10 @@ class TestWeightNormalization(unittest.TestCase): expect_output = self.weight_normalize() for actual_output in self.actual_outputs: [ - self.assertTrue( - numpy.allclose(numpy.array(actual), expect, atol=0.001)) + np.testing.assert_allclose(np.array(actual), + expect, + rtol=1e-05, + atol=0.001) for expect, actual in zip(expect_output, actual_output) ] diff --git a/python/paddle/fluid/tests/unittests/test_where_op.py b/python/paddle/fluid/tests/unittests/test_where_op.py index 83381ac9fcd..e6964861d8e 100644 --- a/python/paddle/fluid/tests/unittests/test_where_op.py +++ b/python/paddle/fluid/tests/unittests/test_where_op.py @@ -345,7 +345,7 @@ class TestWhereDygraphAPI(unittest.TestCase): fetch_list=[z.name], return_numpy=False) expect_out = np.array([[0, 0], [1, 1]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) data = np.array([True, True, False]) with program_guard(Program(), Program()): x = fluid.layers.data(name='x', shape=[(-1)]) @@ -358,7 +358,7 @@ class TestWhereDygraphAPI(unittest.TestCase): fetch_list=[z.name], return_numpy=False) expect_out = np.array([[0], [1]]) - self.assertTrue(np.allclose(expect_out, np.array(res))) + np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) def test_eager(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_while_loop_op.py b/python/paddle/fluid/tests/unittests/test_while_loop_op.py index d97722c0980..a6ec7f77edd 100644 --- a/python/paddle/fluid/tests/unittests/test_while_loop_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_loop_op.py @@ -50,8 +50,9 @@ class TestApiWhileLoop(unittest.TestCase): 0) if core.is_compiled_with_cuda() else fluid.CPUPlace() exe = fluid.Executor(place) res = exe.run(main_program, fetch_list=out) - self.assertTrue( - np.allclose(np.asarray(res[0]), np.full((1), 10, np.int64))) + np.testing.assert_allclose(np.asarray(res[0]), + np.full(1, 10, np.int64), + rtol=1e-05) def test_var_list(self): @@ -81,7 +82,7 @@ class TestApiWhileLoop(unittest.TestCase): res = exe.run(main_program, feed={'mem': data}, fetch_list=out) for i in range(10): data = np.add(data, data_one) - self.assertTrue(np.allclose(np.asarray(res[1]), data)) + np.testing.assert_allclose(np.asarray(res[1]), data, rtol=1e-05) def test_var_dict(self): @@ -127,15 +128,21 @@ class TestApiWhileLoop(unittest.TestCase): test_dict["test_key"], test_list[0], test_list_dict[0]["test_key"] ]) - self.assertTrue( - np.allclose(np.asarray(res[0]), - np.full(shape=(1), fill_value=10, dtype=np.int64))) - self.assertTrue( - np.allclose(np.asarray(res[1]), - np.full(shape=(2, 1), fill_value=10, dtype=np.int64))) - self.assertTrue( - np.allclose(np.asarray(res[2]), - np.full(shape=(1), fill_value=10, dtype=np.float32))) + np.testing.assert_allclose(np.asarray(res[0]), + np.full(shape=1, + fill_value=10, + dtype=np.int64), + rtol=1e-05) + np.testing.assert_allclose(np.asarray(res[1]), + np.full(shape=(2, 1), + fill_value=10, + dtype=np.int64), + rtol=1e-05) + np.testing.assert_allclose(np.asarray(res[2]), + np.full(shape=1, + fill_value=10, + dtype=np.float32), + rtol=1e-05) class TestApiWhileLoop_Nested(unittest.TestCase): @@ -196,7 +203,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase): data_sums = np.add(data, data_sums) for j in range(2): data_sums = np.add(data, data_sums) - self.assertTrue(np.allclose(np.asarray(res[3]), data_sums)) + np.testing.assert_allclose(np.asarray(res[3]), data_sums, rtol=1e-05) class TestApiWhileLoop_Backward(unittest.TestCase): @@ -240,10 +247,8 @@ class TestApiWhileLoop_Backward(unittest.TestCase): 'x': feed_x }, fetch_list=[mean.name, i.grad_name]) - self.assertTrue(np.allclose(np.asarray(res[0]), data)) - self.assertTrue(np.allclose(np.asarray(res[1]), i_grad), - msg=" \nres = \n{} \n\n ans = \n{}".format( - res[1], i_grad)) + np.testing.assert_allclose(np.asarray(res[0]), data, rtol=1e-05) + np.testing.assert_allclose(np.asarray(res[1]), i_grad, rtol=1e-05) def test_while_loop_backward2(self): @@ -283,13 +288,9 @@ class TestApiWhileLoop_Backward(unittest.TestCase): 'x': feed_x }, fetch_list=[mean.name, i.grad_name, x.grad_name]) - self.assertTrue(np.allclose(np.asarray(res[0]), data)) - self.assertTrue(np.allclose(np.asarray(res[1]), i_grad), - msg=" \nres = \n{} \n\n ans = \n{}".format( - res[1], i_grad)) - self.assertTrue(np.allclose(np.asarray(res[2]), x_grad), - msg=" \nres = \n{} \n\n ans = \n{}".format( - res[2], x_grad)) + np.testing.assert_allclose(np.asarray(res[0]), data, rtol=1e-05) + np.testing.assert_allclose(np.asarray(res[1]), i_grad, rtol=1e-05) + np.testing.assert_allclose(np.asarray(res[2]), x_grad, rtol=1e-05) class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): @@ -372,8 +373,8 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): 'x': feed_x }, fetch_list=[sum_result.name, x.grad_name]) - self.assertTrue(np.allclose(res[0], data_sum)) - self.assertTrue(np.allclose(res[1], x_grad)) + np.testing.assert_allclose(res[0], data_sum, rtol=1e-05) + np.testing.assert_allclose(res[1], x_grad, rtol=1e-05) class TestApiWhileLoopWithSwitchCase(unittest.TestCase): @@ -419,7 +420,7 @@ class TestApiWhileLoopWithSwitchCase(unittest.TestCase): res = exe.run(main_program, fetch_list=out) data = np.asarray([25]).astype('int64') - self.assertTrue(np.allclose(np.asarray(res[0]), data)) + np.testing.assert_allclose(np.asarray(res[0]), data, rtol=1e-05) class TestApiWhileLoop_Error(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_zeropad2d.py b/python/paddle/fluid/tests/unittests/test_zeropad2d.py index e2913097ae1..b880725ea8d 100644 --- a/python/paddle/fluid/tests/unittests/test_zeropad2d.py +++ b/python/paddle/fluid/tests/unittests/test_zeropad2d.py @@ -73,7 +73,7 @@ class TestZeroPad2dAPI(unittest.TestCase): x_tensor = to_tensor(x).astype(dtype) ret_res = zeropad2d(x_tensor, [pad, pad, pad, pad]).numpy() - self.assertTrue(np.allclose(expect_res, ret_res)) + np.testing.assert_allclose(expect_res, ret_res, rtol=1e-05) def test_support_dtypes(self): with paddle.fluid.framework._test_eager_guard(): @@ -91,7 +91,7 @@ class TestZeroPad2dAPI(unittest.TestCase): x_tensor = to_tensor(x) ret_res = zeropad2d(x_tensor, pad).numpy() - self.assertTrue(np.allclose(expect_res, ret_res)) + np.testing.assert_allclose(expect_res, ret_res, rtol=1e-05) def test_support_pad2(self): with paddle.fluid.framework._test_eager_guard(): @@ -109,7 +109,7 @@ class TestZeroPad2dAPI(unittest.TestCase): x_tensor = to_tensor(x) ret_res = zeropad2d(x_tensor, pad).numpy() - self.assertTrue(np.allclose(expect_res, ret_res)) + np.testing.assert_allclose(expect_res, ret_res, rtol=1e-05) def test_support_pad3(self): with paddle.fluid.framework._test_eager_guard(): @@ -128,7 +128,7 @@ class TestZeroPad2dAPI(unittest.TestCase): x_tensor = to_tensor(x) pad_tensor = to_tensor(pad, dtype='int32') ret_res = zeropad2d(x_tensor, pad_tensor).numpy() - self.assertTrue(np.allclose(expect_res, ret_res)) + np.testing.assert_allclose(expect_res, ret_res, rtol=1e-05) def test_support_pad4(self): with paddle.fluid.framework._test_eager_guard(): @@ -151,10 +151,10 @@ class TestZeroPad2DLayer(unittest.TestCase): [self.pad[0], self.pad[1]]]) def func_layer(self): - self.assertTrue( - np.allclose( - zeropad2d(to_tensor(self.x), self.pad).numpy(), - self.padLayer(to_tensor(self.x)))) + np.testing.assert_allclose(zeropad2d(to_tensor(self.x), + self.pad).numpy(), + self.padLayer(to_tensor(self.x)), + rtol=1e-05) def test_layer(self): with paddle.fluid.framework._test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_arg_max_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_arg_max_op_xpu.py index 792a729d1fa..76c557f2ce8 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_arg_max_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_arg_max_op_xpu.py @@ -141,8 +141,9 @@ class TestArgMaxAPI(unittest.TestCase): tensor_input = paddle.to_tensor(numpy_input) numpy_output = np.argmax(numpy_input, axis=self.axis) paddle_output = paddle.argmax(tensor_input, axis=self.axis) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) paddle.enable_static() for place in self.place: @@ -174,8 +175,9 @@ class TestArgMaxAPI_2(unittest.TestCase): paddle_output = paddle.argmax(tensor_input, axis=self.axis, keepdim=self.keep_dims) - self.assertEqual(np.allclose(numpy_output, paddle_output.numpy()), - True) + np.testing.assert_allclose(numpy_output, + paddle_output.numpy(), + rtol=1e-05) self.assertEqual(numpy_output.shape, paddle_output.numpy().shape) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py index 175bf152fe1..ab7cdd2ed26 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_assign_value_op_xpu.py @@ -98,10 +98,7 @@ class TestAssignApi(unittest.TestCase): exe = fluid.Executor(self.place) [fetched_x] = exe.run(main_program, feed={}, fetch_list=[x]) - np.testing.assert_allclose(fetched_x, - self.value, - err_msg="fetch_x=%s val=%s" % - (fetched_x, self.value)) + np.testing.assert_allclose(fetched_x, self.value) self.assertEqual(fetched_x.dtype, self.value.dtype) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py index 2175243ef1d..9fdc34fa3a2 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py @@ -203,7 +203,7 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): self.bias_np, self.mean_np, self.variance_np, self.momentum, self.epsilon, self.data_layout) - self.assertEqual(np.allclose(y_np_ref, y_np), True) + np.testing.assert_allclose(y_np_ref, y_np, rtol=1e-05) def test_train(self): y_grad_np = np.random.random_sample(self.shape).astype(self.dtype) @@ -280,8 +280,10 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): exe = paddle.static.Executor(self.place) outs = exe.run(program, feed=inputs, fetch_list=fetch_list) for id, name in enumerate(fetch_list): - self.assertEqual( - np.allclose(outputs[name], outs[id], atol=1e-4), True) + np.testing.assert_allclose(outputs[name], + outs[id], + rtol=1e-05, + atol=1e-4) class TestBatchNormOpUseGlobalStats(unittest.TestCase): @@ -313,7 +315,9 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): net2.training = False y1 = net1(x) y2 = net2(x) - self.assertEqual(np.allclose(y1.numpy(), y2.numpy()), True) + np.testing.assert_allclose(y1.numpy(), + y2.numpy(), + rtol=1e-05) class TestBatchNormOpUseGlobalStats1(TestBatchNormOpUseGlobalStats): ### test mode diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py index de0c7000e1d..8adb724a9ee 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mod_op_xpu.py @@ -111,7 +111,7 @@ class XPUTestElementwiseModOp(XPUOpTestWrapper): y = paddle.to_tensor(np_y) z = x % y z_expected = np.array([-0.9, 1.5, 1.3, -1.1]) - self.assertEqual(np.allclose(z_expected, z.numpy()), True) + np.testing.assert_allclose(z_expected, z.numpy(), rtol=1e-05) np_x = np.random.rand(22, 128, 3).astype('int32') np_y = np.random.rand(22, 128, 3).astype('int32') @@ -128,7 +128,7 @@ class XPUTestElementwiseModOp(XPUOpTestWrapper): y = paddle.to_tensor(np_y, dtype="float16") z = x % y z_expected = np.array([0, 1, 1, -1]) - self.assertEqual(np.allclose(z_expected, z.numpy()), True) + np.testing.assert_allclose(z_expected, z.numpy(), rtol=1e-05) support_types = get_xpu_op_support_types('elementwise_mod') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py index 4a1601ed990..0d541fb1000 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py @@ -75,12 +75,7 @@ class XPUTestGaussianRandomOp(XPUOpTestWrapper): hist2, _ = np.histogram(data, range=(-3, 5)) hist2 = hist2.astype("float32") hist2 /= float(outs[0].size) - np.testing.assert_allclose(hist, - hist2, - rtol=0, - atol=0.01, - err_msg="hist: " + str(hist) + - " hist2: " + str(hist2)) + np.testing.assert_allclose(hist, hist2, rtol=0, atol=0.01) class TestMeanStdAreInt(TestGaussianRandomOp): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py index f7be0e61d81..97ed1044cc9 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py @@ -214,11 +214,7 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): expected_result = np.matmul(data1.reshape(1, 2), data2.reshape(2, 1)) - np.testing.assert_allclose(np_res, - expected_result, - atol=1e-3, - err_msg="two value is\ - {}\n{}, check diff!".format(np_res, expected_result)) + np.testing.assert_allclose(np_res, expected_result, atol=1e-3) def test_dygraph_without_out(self): device = fluid.XPUPlace(0) diff --git a/python/paddle/tests/test_async_read_write.py b/python/paddle/tests/test_async_read_write.py index 5336ca0da17..a235b786480 100644 --- a/python/paddle/tests/test_async_read_write.py +++ b/python/paddle/tests/test_async_read_write.py @@ -46,7 +46,7 @@ class TestAsyncRead(unittest.TestCase): array1 = paddle.gather(self.src, self.index) array2 = self.dst[:len(self.index)] - self.assertTrue(np.allclose(array1.numpy(), array2.numpy())) + np.testing.assert_allclose(array1.numpy(), array2.numpy(), rtol=1e-05) def func_test_async_read_success(self): offset = paddle.to_tensor(np.array([10, 20], dtype="int64"), @@ -64,15 +64,18 @@ class TestAsyncRead(unittest.TestCase): index_array1 = paddle.gather(self.src, self.index) count_numel = paddle.sum(count).numpy()[0] index_array2 = self.dst[count_numel:count_numel + len(self.index)] - self.assertTrue(np.allclose(index_array1.numpy(), index_array2.numpy())) + np.testing.assert_allclose(index_array1.numpy(), + index_array2.numpy(), + rtol=1e-05) # offset, count offset_a = paddle.gather(self.src, paddle.to_tensor(np.arange(10, 15))) offset_b = paddle.gather(self.src, paddle.to_tensor(np.arange(20, 30))) offset_array1 = paddle.concat([offset_a, offset_b], axis=0) offset_array2 = self.dst[:count_numel] - self.assertTrue( - np.allclose(offset_array1.numpy(), offset_array2.numpy())) + np.testing.assert_allclose(offset_array1.numpy(), + offset_array2.numpy(), + rtol=1e-05) def func_test_async_read_only_1dim(self): src = paddle.rand([40], dtype="float32").pin_memory() @@ -87,7 +90,7 @@ class TestAsyncRead(unittest.TestCase): self.empty) array1 = paddle.gather(src, self.index) array2 = dst[:len(self.index)] - self.assertTrue(np.allclose(array1.numpy(), array2.numpy())) + np.testing.assert_allclose(array1.numpy(), array2.numpy(), rtol=1e-05) def test_main(self): with _test_eager_guard(): @@ -127,7 +130,9 @@ class TestAsyncWrite(unittest.TestCase): offset_a = paddle.gather(self.dst, paddle.to_tensor(np.arange(0, 40))) offset_b = paddle.gather(self.dst, paddle.to_tensor(np.arange(60, 120))) offset_array = paddle.concat([offset_a, offset_b], axis=0) - self.assertTrue(np.allclose(self.src.numpy(), offset_array.numpy())) + np.testing.assert_allclose(self.src.numpy(), + offset_array.numpy(), + rtol=1e-05) def test_async_write_success(self): with _test_eager_guard(): diff --git a/python/paddle/tests/test_dlpack.py b/python/paddle/tests/test_dlpack.py index 076fe5545db..353dc7ebfef 100644 --- a/python/paddle/tests/test_dlpack.py +++ b/python/paddle/tests/test_dlpack.py @@ -48,7 +48,7 @@ class TestDLPack(unittest.TestCase): # TODO: There may be a reference count problem of to_dlpack. dlpack = paddle.utils.dlpack.to_dlpack(t) out = paddle.utils.dlpack.from_dlpack(dlpack) - self.assertTrue(np.allclose(numpy_data, out.numpy())) + np.testing.assert_allclose(numpy_data, out.numpy(), rtol=1e-05) def test_dlpack_tensor_larger_than_2dim(self): with _test_eager_guard(): @@ -98,7 +98,7 @@ class TestDLPack(unittest.TestCase): dlpack = paddle.utils.dlpack.to_dlpack(x) o = paddle.utils.dlpack.from_dlpack(dlpack) self.assertEqual(x.dtype, o.dtype) - self.assertTrue(np.allclose(x.numpy(), o.numpy())) + np.testing.assert_allclose(x.numpy(), o.numpy(), rtol=1e-05) complex_dtypes = ["complex64", "complex128"] for dtype in complex_dtypes: @@ -108,7 +108,7 @@ class TestDLPack(unittest.TestCase): dlpack = paddle.utils.dlpack.to_dlpack(x) o = paddle.utils.dlpack.from_dlpack(dlpack) self.assertEqual(x.dtype, o.dtype) - self.assertTrue(np.allclose(x.numpy(), o.numpy())) + np.testing.assert_allclose(x.numpy(), o.numpy(), rtol=1e-05) def test_dlpack_dtype_conversion(self): with _test_eager_guard(): -- GitLab