From 72c711bbddafee8c018abdd0414fb6a420b30e04 Mon Sep 17 00:00:00 2001 From: GGBond8488 <33050871+GGBond8488@users.noreply.github.com> Date: Tue, 21 Mar 2023 17:27:13 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90fluid=20clean=E3=80=91remove=20fluid.d?= =?UTF-8?q?ata=20(#50699)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * remove fluid.data * fix typro * fix somme unitest error * fix conflicts * fix sample code error * fxi sample coder error * fxi sample code error * fxi sample code error * fix xpu test error * fix xpu test error * Delete ps_pb2.py * fix test error * fix typro * fix sample code error * fix comments * fix test norm op data * fix sample code error * fix conflicts --- paddle/fluid/framework/reader.h | 2 - python/paddle/fluid/__init__.py | 3 - python/paddle/fluid/contrib/layers/nn.py | 58 +++--- python/paddle/fluid/data.py | 126 ------------- python/paddle/fluid/data_feeder.py | 18 +- python/paddle/fluid/executor.py | 2 +- python/paddle/fluid/framework.py | 4 +- python/paddle/fluid/io.py | 4 +- python/paddle/fluid/layers/control_flow.py | 14 +- python/paddle/fluid/layers/math_op_patch.py | 6 +- python/paddle/fluid/layers/nn.py | 6 +- python/paddle/fluid/nets.py | 14 +- python/paddle/fluid/optimizer.py | 28 +-- python/paddle/fluid/profiler.py | 5 +- python/paddle/fluid/reader.py | 26 +-- python/paddle/fluid/tests/test_detection.py | 22 ++- .../tests/unittests/asp/asp_pruning_base.py | 6 +- .../asp/test_asp_customized_pruning.py | 6 +- .../unittests/asp/test_asp_optimize_static.py | 6 +- .../unittests/asp/test_asp_pruning_static.py | 6 +- .../tests/unittests/asp/test_asp_save_load.py | 6 +- .../tests/unittests/auto_checkpoint_utils.py | 8 +- .../fleet/static_model_parallel_by_col.py | 2 +- .../fleet/static_model_parallel_by_row.py | 2 +- .../fleet/static_model_parallel_embedding.py | 2 +- .../collective/fleet/test_fleet_checkpoint.py | 6 +- .../test_distribution_categorical.py | 20 +- .../dygraph_to_static/test_static_analysis.py | 2 +- .../ipu/test_fill_any_like_op_ipu.py | 2 +- .../test_mkldnn_cpu_bfloat16_pass.py | 2 +- .../test_mkldnn_elt_act_fuse_pass.py | 4 +- .../test_mkldnn_matmul_op_output_fuse_pass.py | 12 +- ...n_reshape_transpose_matmul_v2_fuse_pass.py | 2 +- .../ir/inference/test_trt_activation_pass.py | 2 +- .../ir/inference/test_trt_conv3d_op.py | 4 +- .../inference/test_trt_conv3d_transpose_op.py | 4 +- .../ir/inference/test_trt_conv_pass.py | 6 +- .../test_trt_conv_quant_dequant_pass.py | 18 +- .../ir/inference/test_trt_deformable_conv.py | 6 +- .../ir/inference/test_trt_dynamic_shape.py | 2 +- .../ir/inference/test_trt_elementwise_op.py | 4 +- .../ir/inference/test_trt_fc_fuse_pass.py | 24 ++- .../test_trt_fc_fuse_quant_dequant_pass.py | 18 +- .../ir/inference/test_trt_flatten_op.py | 4 +- .../ir/inference/test_trt_gather_nd_op.py | 14 +- .../ir/inference/test_trt_gather_op.py | 14 +- .../ir/inference/test_trt_inspector.py | 4 +- .../ir/inference/test_trt_instance_norm_op.py | 3 +- .../unittests/ir/inference/test_trt_matmul.py | 12 +- .../test_trt_matmul_quant_dequant.py | 18 +- .../inference/test_trt_multiclass_nms3_op.py | 4 +- .../inference/test_trt_nearest_interp_op.py | 2 +- .../test_trt_nearest_interp_v2_op.py | 3 +- .../unittests/ir/inference/test_trt_pad_op.py | 2 +- .../ir/inference/test_trt_pool3d_op.py | 6 +- .../ir/inference/test_trt_pool_op.py | 2 +- .../ir/inference/test_trt_reduce_sum_op.py | 4 +- .../ir/inference/test_trt_reshape_op.py | 8 +- .../ir/inference/test_trt_scale_op.py | 6 +- .../test_trt_shuffle_channel_detect_pass.py | 2 +- .../test_trt_slice_dynamic_plugin.py | 4 +- .../ir/inference/test_trt_slice_plugin.py | 12 +- .../ir/inference/test_trt_subgraph_pass.py | 30 +-- .../ir/inference/test_trt_tile_op.py | 14 +- ..._trt_transpose_flatten_concat_fuse_pass.py | 4 +- .../inference/test_trt_tuned_dynamic_shape.py | 2 +- .../ir/inference/test_trt_yolo_box_op.py | 18 +- .../unittests/ir/test_ir_fc_fuse_pass.py | 2 +- .../unittests/ir/test_ir_fusion_group_pass.py | 12 +- .../ir/test_ir_skip_layernorm_pass.py | 4 +- .../unittests/mlu/test_batch_norm_op_mlu.py | 4 +- .../mlu/test_batch_norm_op_mlu_v2.py | 4 +- .../tests/unittests/mlu/test_bce_loss_mlu.py | 12 +- .../mlu/test_bce_with_logits_loss_mlu.py | 8 +- .../unittests/mlu/test_dropout_op_mlu.py | 2 +- .../mlu/test_elementwise_add_op_mlu.py | 8 +- .../mlu/test_fill_constant_op_mlu.py | 8 +- .../tests/unittests/mlu/test_gather_op_mlu.py | 14 +- .../unittests/mlu/test_hard_sigmoid_op_mlu.py | 6 +- .../unittests/mlu/test_log_softmax_op_mlu.py | 8 +- .../mlu/test_masked_select_op_mlu.py | 10 +- .../unittests/mlu/test_matmul_v2_op_mlu.py | 4 +- .../unittests/mlu/test_meshgrid_op_mlu.py | 12 +- .../unittests/mlu/test_scatter_op_mlu.py | 6 +- .../tests/unittests/mlu/test_size_op_mlu.py | 4 +- .../unittests/mlu/test_softmax_op_mlu.py | 6 +- .../unittests/mlu/test_transpose_op_mlu.py | 8 +- .../unittests/mlu/test_tril_triu_op_mlu.py | 6 +- .../unittests/npu/test_batch_norm_op_npu.py | 2 +- .../tests/unittests/npu/test_bce_loss_npu.py | 12 +- .../tests/unittests/npu/test_clip_op_npu.py | 10 +- .../unittests/npu/test_dropout_op_npu.py | 2 +- .../npu/test_elementwise_add_op_npu.py | 8 +- .../npu/test_elementwise_mod_op_npu.py | 4 +- .../tests/unittests/npu/test_gather_op_npu.py | 4 +- .../unittests/npu/test_group_norm_op_npu.py | 2 +- .../unittests/npu/test_hard_sigmoid_op_npu.py | 6 +- .../unittests/npu/test_index_sample_op_npu.py | 4 +- .../npu/test_instance_norm_op_npu.py | 4 +- .../unittests/npu/test_kldiv_loss_op_npu.py | 4 +- .../unittests/npu/test_log_softmax_op_npu.py | 4 +- .../npu/test_masked_select_op_npu.py | 10 +- .../unittests/npu/test_matmulv2_op_npu.py | 4 +- .../unittests/npu/test_meshgrid_op_npu.py | 12 +- .../unittests/npu/test_multinomial_op_npu.py | 2 +- .../tests/unittests/npu/test_norm_op_npu.py | 2 +- .../tests/unittests/npu/test_pad3d_op_npu.py | 10 +- .../tests/unittests/npu/test_pad_op_npu.py | 2 +- .../unittests/npu/test_run_program_op_npu.py | 2 +- .../tests/unittests/npu/test_size_op_npu.py | 4 +- .../npu/test_take_along_axis_op_npu.py | 4 +- .../unittests/npu/test_tril_triu_op_npu.py | 6 +- .../npu/test_update_loss_scaling_op_npu.py | 24 +-- .../tests/unittests/npu/test_where_op_npu.py | 6 +- .../unittests/rnn/test_rnn_cells_static.py | 20 +- .../unittests/rnn/test_rnn_nets_static.py | 32 ++-- .../sequence/test_sequence_pad_op.py | 5 +- .../static_model_parallel_fused_attention.py | 2 +- ...static_model_parallel_fused_feedforward.py | 2 +- ..._model_parallel_fused_multi_transformer.py | 2 +- .../tests/unittests/test_activation_op.py | 149 +++++++-------- .../fluid/tests/unittests/test_adam_op.py | 6 +- .../test_adam_optimizer_fp32_fp64.py | 4 +- .../fluid/tests/unittests/test_adamax_api.py | 2 +- .../fluid/tests/unittests/test_adamw_op.py | 10 +- .../unittests/test_adaptive_avg_pool1d.py | 4 +- .../unittests/test_adaptive_avg_pool2d.py | 8 +- .../unittests/test_adaptive_avg_pool3d.py | 4 +- .../unittests/test_adaptive_max_pool1d.py | 2 +- .../unittests/test_adaptive_max_pool2d.py | 8 +- .../unittests/test_adaptive_max_pool3d.py | 4 +- .../unittests/test_add_reader_dependency.py | 7 +- .../unittests/test_affine_grid_function.py | 4 +- .../tests/unittests/test_allclose_layer.py | 4 +- .../fluid/tests/unittests/test_allclose_op.py | 16 +- .../fluid/tests/unittests/test_argsort_op.py | 2 +- .../tests/unittests/test_ascend_trigger.py | 8 +- .../fluid/tests/unittests/test_assign_op.py | 4 +- .../tests/unittests/test_assign_pos_op.py | 4 +- .../fluid/tests/unittests/test_atan2_op.py | 8 +- .../fluid/tests/unittests/test_auc_op.py | 14 +- .../fluid/tests/unittests/test_backward.py | 28 ++- .../tests/unittests/test_batch_norm_op.py | 6 +- .../tests/unittests/test_batch_norm_op_v2.py | 8 +- .../fluid/tests/unittests/test_bce_loss.py | 12 +- .../unittests/test_bce_with_logits_loss.py | 8 +- .../tests/unittests/test_bicubic_interp_op.py | 50 +++-- .../unittests/test_bicubic_interp_v2_op.py | 86 ++++++--- .../unittests/test_bilateral_slice_op.py | 6 +- .../tests/unittests/test_bilinear_api.py | 4 +- .../test_bilinear_tensor_product_op.py | 8 +- .../fluid/tests/unittests/test_bincount_op.py | 6 +- .../tests/unittests/test_calc_gradient.py | 2 +- .../paddle/fluid/tests/unittests/test_case.py | 8 +- .../tests/unittests/test_channel_shuffle.py | 8 +- .../fluid/tests/unittests/test_cholesky_op.py | 4 +- .../tests/unittests/test_cholesky_solve_op.py | 28 +-- .../fluid/tests/unittests/test_chunk_op.py | 20 +- .../fluid/tests/unittests/test_clip_op.py | 12 +- .../fluid/tests/unittests/test_compare_op.py | 8 +- .../fluid/tests/unittests/test_concat_op.py | 18 +- .../paddle/fluid/tests/unittests/test_cond.py | 30 +-- .../tests/unittests/test_conv1d_layer.py | 8 +- .../unittests/test_conv1d_transpose_layer.py | 8 +- .../tests/unittests/test_conv2d_layer.py | 12 +- .../unittests/test_conv2d_transpose_layer.py | 12 +- .../tests/unittests/test_conv3d_layer.py | 12 +- .../unittests/test_conv3d_transpose_layer.py | 12 +- .../paddle/fluid/tests/unittests/test_corr.py | 2 +- .../unittests/test_cosine_similarity_api.py | 4 +- .../tests/unittests/test_count_nonzero_api.py | 4 +- .../fluid/tests/unittests/test_crop_op.py | 7 +- .../tests/unittests/test_crop_tensor_op.py | 13 +- .../unittests/test_cross_entropy_loss.py | 178 ++++++++++++------ .../fluid/tests/unittests/test_cross_op.py | 4 +- .../fluid/tests/unittests/test_cumprod_op.py | 2 +- .../paddle/fluid/tests/unittests/test_data.py | 31 +-- .../tests/unittests/test_data_norm_op.py | 2 +- .../unittests/test_dataloader_early_reset.py | 2 +- .../unittests/test_dataloader_keep_order.py | 4 +- .../unittests/test_dataloader_unkeep_order.py | 4 +- .../fluid/tests/unittests/test_dataset.py | 2 +- .../fluid/tests/unittests/test_deg2rad.py | 4 +- .../unittests/test_deprecated_decorator.py | 21 --- .../tests/unittests/test_determinant_op.py | 4 +- .../fluid/tests/unittests/test_diag_embed.py | 5 +- .../fluid/tests/unittests/test_diagonal_op.py | 2 +- .../fluid/tests/unittests/test_diff_op.py | 6 +- .../fluid/tests/unittests/test_dist_op.py | 8 +- .../unittests/test_dist_sparse_load_ps0.py | 4 +- .../test_dist_sparse_tensor_load_sgd.py | 4 +- .../fluid/tests/unittests/test_dropout_op.py | 72 +++++-- .../test_dynamic_rnn_stop_gradient.py | 4 +- .../fluid/tests/unittests/test_eig_op.py | 4 +- .../unittests/test_elementwise_add_op.py | 8 +- .../unittests/test_elementwise_div_op.py | 6 +- .../unittests/test_elementwise_floordiv_op.py | 4 +- .../unittests/test_elementwise_mod_op.py | 4 +- .../unittests/test_elementwise_sub_op.py | 8 +- .../paddle/fluid/tests/unittests/test_ema.py | 4 +- .../test_embedding_id_stop_gradient.py | 4 +- .../fluid/tests/unittests/test_empty_op.py | 6 +- .../fluid/tests/unittests/test_erfinv_op.py | 2 +- .../unittests/test_executor_check_feed.py | 6 +- .../test_executor_feed_non_tensor.py | 6 +- .../test_feed_data_check_shape_type.py | 12 +- .../unittests/test_fetch_lod_tensor_array.py | 8 +- .../tests/unittests/test_fill_constant_op.py | 8 +- .../unittests/test_fleet_pyramid_hash.py | 4 +- .../paddle/fluid/tests/unittests/test_flip.py | 12 +- .../fluid/tests/unittests/test_frac_api.py | 4 +- .../fluid/tests/unittests/test_frexp_api.py | 2 +- .../tests/unittests/test_full_like_op.py | 4 +- .../fluid/tests/unittests/test_full_op.py | 8 +- .../tests/unittests/test_functional_conv2d.py | 28 +-- .../test_functional_conv2d_transpose.py | 28 +-- .../tests/unittests/test_functional_conv3d.py | 28 +-- .../test_functional_conv3d_transpose.py | 28 +-- .../test_fuse_elewise_add_act_pass.py | 4 +- .../test_fused_multi_transformer_op.py | 6 +- .../tests/unittests/test_gather_nd_op.py | 6 +- .../fluid/tests/unittests/test_gather_op.py | 28 +-- .../unittests/test_gaussian_random_op.py | 4 +- .../paddle/fluid/tests/unittests/test_gcd.py | 8 +- .../test_get_tensor_from_selected_rows_op.py | 4 +- .../tests/unittests/test_gradient_clip.py | 12 +- .../unittests/test_grid_sample_function.py | 6 +- .../tests/unittests/test_group_norm_op.py | 12 +- .../tests/unittests/test_gumbel_softmax_op.py | 4 +- .../tests/unittests/test_histogram_op.py | 8 +- .../fluid/tests/unittests/test_hsigmoid_op.py | 8 +- .../tests/unittests/test_identity_loss_op.py | 4 +- .../fluid/tests/unittests/test_identity_op.py | 2 +- .../test_imperative_load_static_param.py | 36 ++-- ...perative_star_gan_with_gradient_penalty.py | 6 +- .../tests/unittests/test_index_sample_op.py | 4 +- .../tests/unittests/test_inference_api.py | 4 +- .../fluid/tests/unittests/test_initializer.py | 4 +- .../unittests/test_inplace_addto_strategy.py | 2 +- .../unittests/test_instance_norm_op_v2.py | 8 +- .../fluid/tests/unittests/test_inverse_op.py | 20 +- .../tests/unittests/test_io_save_load.py | 4 +- .../fluid/tests/unittests/test_isclose_op.py | 24 ++- .../tests/unittests/test_isfinite_v2_op.py | 2 +- .../tests/unittests/test_kldiv_loss_op.py | 4 +- .../fluid/tests/unittests/test_kron_op.py | 4 +- .../fluid/tests/unittests/test_l1_loss.py | 12 +- .../unittests/test_label_smooth_functional.py | 4 +- .../tests/unittests/test_layer_norm_op_v2.py | 8 +- .../fluid/tests/unittests/test_layers.py | 24 ++- .../paddle/fluid/tests/unittests/test_lcm.py | 8 +- .../fluid/tests/unittests/test_lerp_op.py | 4 +- .../tests/unittests/test_linalg_lstsq_op.py | 4 +- .../tests/unittests/test_linalg_pinv_op.py | 2 +- .../tests/unittests/test_linear_interp_op.py | 10 +- .../unittests/test_linear_interp_v2_op.py | 8 +- .../fluid/tests/unittests/test_linspace.py | 8 +- .../test_load_state_dict_from_old_format.py | 6 +- .../fluid/tests/unittests/test_log_softmax.py | 8 +- .../fluid/tests/unittests/test_logit_op.py | 6 +- .../fluid/tests/unittests/test_logsumexp.py | 4 +- .../fluid/tests/unittests/test_lookahead.py | 4 +- .../tests/unittests/test_lookup_table_op.py | 11 +- .../unittests/test_lookup_table_v2_op.py | 10 +- .../fluid/tests/unittests/test_lrn_op.py | 22 ++- .../fluid/tests/unittests/test_lu_op.py | 2 +- .../tests/unittests/test_lu_unpack_op.py | 4 +- .../unittests/test_margin_rank_loss_op.py | 8 +- .../tests/unittests/test_masked_select_op.py | 10 +- .../fluid/tests/unittests/test_matmul_op.py | 22 ++- .../tests/unittests/test_matmul_v2_op.py | 8 +- .../tests/unittests/test_matrix_nms_op.py | 5 +- .../tests/unittests/test_matrix_power_op.py | 34 +++- .../tests/unittests/test_matrix_rank_op.py | 8 +- .../unittests/test_max_min_amax_amin_op.py | 4 +- .../fluid/tests/unittests/test_maxout_op.py | 6 +- .../fluid/tests/unittests/test_mean_op.py | 6 +- .../fluid/tests/unittests/test_median.py | 2 +- .../fluid/tests/unittests/test_meshgrid_op.py | 12 +- .../tests/unittests/test_modelaverage.py | 4 +- .../fluid/tests/unittests/test_mse_loss.py | 28 ++- .../tests/unittests/test_multinomial_op.py | 2 +- .../tests/unittests/test_multiplex_op.py | 18 +- ...cess_dataloader_iterable_dataset_static.py | 6 +- .../test_multiprocess_dataloader_static.py | 14 +- .../test_multiprocess_reader_exception.py | 4 +- .../tests/unittests/test_nan_to_num_op.py | 2 +- .../fluid/tests/unittests/test_nanmean_api.py | 4 +- .../fluid/tests/unittests/test_nanmedian.py | 6 +- .../fluid/tests/unittests/test_nansum_api.py | 4 +- .../fluid/tests/unittests/test_neg_op.py | 4 +- .../fluid/tests/unittests/test_nll_loss.py | 160 +++++++++++----- .../fluid/tests/unittests/test_norm_all.py | 18 +- .../fluid/tests/unittests/test_norm_op.py | 2 +- .../fluid/tests/unittests/test_normal.py | 14 +- .../fluid/tests/unittests/test_normalize.py | 4 +- .../tests/unittests/test_npair_loss_op.py | 14 +- .../tests/unittests/test_number_count_op.py | 2 +- .../fluid/tests/unittests/test_numel_op.py | 4 +- .../fluid/tests/unittests/test_ones_like.py | 4 +- .../tests/unittests/test_op_name_conflict.py | 4 +- .../test_optimizer_in_control_flow.py | 6 +- .../fluid/tests/unittests/test_pad3d_op.py | 8 +- .../fluid/tests/unittests/test_pad_op.py | 3 +- .../test_paddle_fluid_modelaverage.py | 4 +- .../unittests/test_paddle_save_load_binary.py | 4 +- .../tests/unittests/test_pairwise_distance.py | 4 +- .../tests/unittests/test_pixel_shuffle_op.py | 12 +- .../tests/unittests/test_pixel_unshuffle.py | 8 +- .../fluid/tests/unittests/test_pool1d_api.py | 8 +- .../fluid/tests/unittests/test_pool2d_api.py | 4 +- .../fluid/tests/unittests/test_pool3d_api.py | 4 +- .../fluid/tests/unittests/test_prelu_op.py | 18 +- .../fluid/tests/unittests/test_prod_op.py | 6 +- .../fluid/tests/unittests/test_program.py | 2 +- .../tests/unittests/test_put_along_axis_op.py | 6 +- .../tests/unittests/test_pyramid_hash_op.py | 5 +- .../fluid/tests/unittests/test_qr_op.py | 2 +- .../fluid/tests/unittests/test_rad2deg.py | 4 +- .../fluid/tests/unittests/test_rand_op.py | 6 +- .../tests/unittests/test_randint_like.py | 24 +-- .../fluid/tests/unittests/test_reduce_op.py | 6 +- .../tests/unittests/test_rnn_cell_api.py | 14 +- .../tests/unittests/test_rnn_decode_api.py | 16 +- .../fluid/tests/unittests/test_rot90_op.py | 64 +++++-- .../fluid/tests/unittests/test_row_conv_op.py | 2 +- .../fluid/tests/unittests/test_rrelu_op.py | 18 +- .../tests/unittests/test_run_program_op.py | 2 +- .../test_scaled_dot_product_attention.py | 25 +-- .../fluid/tests/unittests/test_scatter_op.py | 10 +- .../fluid/tests/unittests/test_selu_op.py | 10 +- .../unittests/test_sigmoid_focal_loss.py | 6 +- .../fluid/tests/unittests/test_size_op.py | 4 +- .../fluid/tests/unittests/test_slice_op.py | 12 +- .../tests/unittests/test_smooth_l1_loss.py | 32 +++- .../fluid/tests/unittests/test_softmax2d.py | 4 +- .../unittests/test_softmax_mask_fuse_op.py | 6 +- ...est_softmax_mask_fuse_upper_triangle_op.py | 2 +- .../fluid/tests/unittests/test_softmax_op.py | 6 +- .../fluid/tests/unittests/test_solve_op.py | 44 ++--- .../fluid/tests/unittests/test_sort_op.py | 8 +- .../fluid/tests/unittests/test_split_op.py | 4 +- .../fluid/tests/unittests/test_splits_api.py | 4 +- .../tests/unittests/test_square_error_cost.py | 8 +- .../tests/unittests/test_static_save_load.py | 2 +- .../fluid/tests/unittests/test_std_layer.py | 4 +- .../fluid/tests/unittests/test_sum_op.py | 30 +-- .../fluid/tests/unittests/test_svd_op.py | 2 +- .../fluid/tests/unittests/test_switch_case.py | 8 +- .../paddle/fluid/tests/unittests/test_take.py | 14 +- .../unittests/test_take_along_axis_op.py | 4 +- .../fluid/tests/unittests/test_trace_op.py | 4 +- .../tests/unittests/test_transpose_op.py | 12 +- .../unittests/test_triangular_solve_op.py | 28 +-- .../tests/unittests/test_tril_triu_op.py | 12 +- .../fluid/tests/unittests/test_trunc_op.py | 4 +- .../fluid/tests/unittests/test_unbind_op.py | 10 +- .../unittests/test_uniform_random_bf16_op.py | 4 +- .../tests/unittests/test_uniform_random_op.py | 4 +- .../fluid/tests/unittests/test_unique.py | 11 +- .../unittests/test_unique_consecutive_op.py | 6 +- .../unittests/test_unique_with_counts.py | 3 +- .../fluid/tests/unittests/test_unpool1d_op.py | 2 +- .../fluid/tests/unittests/test_unpool3d_op.py | 2 +- .../fluid/tests/unittests/test_unpool_op.py | 2 +- .../fluid/tests/unittests/test_unzip_op.py | 4 +- .../unittests/test_update_loss_scaling_op.py | 29 +-- .../tests/unittests/test_variance_layer.py | 4 +- .../tests/unittests/test_viterbi_decode_op.py | 8 +- .../fluid/tests/unittests/test_warprnnt_op.py | 21 ++- .../tests/unittests/test_while_loop_op.py | 26 +-- .../tests/unittests/test_zeros_like_op.py | 4 +- .../unittests/xpu/test_activation_op_xpu.py | 6 +- .../tests/unittests/xpu/test_adamw_op_xpu.py | 10 +- .../tests/unittests/xpu/test_assign_op_xpu.py | 4 - .../unittests/xpu/test_batch_norm_op_xpu.py | 10 +- .../xpu/test_bilinear_interp_op_xpu.py | 10 +- .../tests/unittests/xpu/test_clip_op_xpu.py | 12 +- .../unittests/xpu/test_diagonal_op_xpu.py | 2 +- .../xpu/test_elementwise_add_op_xpu.py | 8 +- .../xpu/test_elementwise_add_op_xpu_kp.py | 8 +- .../xpu/test_elementwise_div_op_xpu.py | 2 +- .../xpu/test_gaussian_random_op_xpu.py | 4 +- .../unittests/xpu/test_index_sample_op_xpu.py | 4 +- .../unittests/xpu/test_kldiv_loss_op_xpu.py | 4 +- .../xpu/test_masked_select_op_xpu.py | 10 +- .../tests/unittests/xpu/test_matmul_op_xpu.py | 20 +- .../xpu/test_nearest_interp_op_xpu.py | 2 +- .../tests/unittests/xpu/test_pad3d_op_xpu.py | 12 +- .../tests/unittests/xpu/test_prelu_op_xpu.py | 6 +- .../tests/unittests/xpu/test_prod_op_xpu.py | 6 +- .../tests/unittests/xpu/test_sum_op_xpu.py | 30 +-- .../unittests/xpu/test_tril_triu_op_xpu.py | 5 +- .../tests/unittests/xpu/test_unbind_op_xpu.py | 10 +- .../xpu/test_update_loss_scaling_op_xpu.py | 32 ++-- .../unittests/xpu/test_warpctc_op_xpu.py | 11 +- .../tests/unittests/xpu/test_where_op_xpu.py | 10 +- .../jit/dy2static/program_translator.py | 2 +- python/paddle/nn/clip.py | 2 +- python/paddle/reader/decorator.py | 4 +- python/paddle/static/io.py | 2 +- python/paddle/tensor/attribute.py | 2 +- python/paddle/tensor/random.py | 2 +- python/paddle/tests/test_metrics.py | 6 +- 404 files changed, 2401 insertions(+), 1822 deletions(-) delete mode 100644 python/paddle/fluid/data.py diff --git a/paddle/fluid/framework/reader.h b/paddle/fluid/framework/reader.h index b2c48c5877d..9215d701510 100644 --- a/paddle/fluid/framework/reader.h +++ b/paddle/fluid/framework/reader.h @@ -92,8 +92,6 @@ class ReaderBase { std::vector var_types_; // Whether to check the shape and dtype of fed variables. - // For Backward compatibility, variables created by old API fluid.layers.data - // doesn't check shape but fluid.data checks. std::vector need_check_feed_; private: diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index db3fefcbace..51ed68d6cf0 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -46,8 +46,6 @@ from .data_feed_desc import * from . import dataset from .dataset import * -from .data import * - from . import trainer_desc from . import io @@ -117,7 +115,6 @@ __all__ = ( 'initializer', 'layers', 'contrib', - 'data', 'dygraph', 'enable_dygraph', 'disable_dygraph', diff --git a/python/paddle/fluid/contrib/layers/nn.py b/python/paddle/fluid/contrib/layers/nn.py index 5763b895bec..92c95f8eac2 100644 --- a/python/paddle/fluid/contrib/layers/nn.py +++ b/python/paddle/fluid/contrib/layers/nn.py @@ -567,8 +567,9 @@ def partial_concat(input, start_index=0, length=-1): Examples: .. code-block:: python import paddle.fluid as fluid - x = fluid.data(name="x", shape=[None,3], dtype="float32") - y = fluid.data(name="y", shape=[None,3], dtype="float32") + import paddle + x = paddle.randn(name="x", shape=[1,3], dtype="float32") + y = paddle.randn(name="y", shape=[1,3], dtype="float32") concat = fluid.contrib.layers.partial_concat( [x, y], start_index=0, length=2) """ @@ -629,9 +630,12 @@ def partial_sum(input, start_index=0, length=-1): import paddle.fluid.layers as layers import paddle.fluid as fluid import numpy as np - x = fluid.data(name="x", shape=[None, 3], dtype="float32") - y = fluid.data(name="y", shape=[None, 3], dtype="float32") - sum = layers.partial_sum([x,y], start_index=0, length=2) + import paddle + paddle.enable_static() + + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name="y", shape=[2, 3], dtype="float32") + sum = fluid.contrib.layers.partial_sum([x,y], start_index=0, length=2) place = fluid.CPUPlace() exe = fluid.Executor(place) xx = np.array([1,2,3,4,5,6]).reshape((2,3)).astype("float32") @@ -898,7 +902,7 @@ def tdm_child(x, node_nums, child_nums, param_attr=None, dtype='int32'): import paddle.fluid as fluid import numpy as np paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1], dtype="int32", lod_level=1) + x = paddle.static.data(name="x", shape=[None, 1], dtype="int32", lod_level=1) tree_info = [[0,0,0,1,2], [0,1,0,3,4],[0,1,0,5,6], [0,2,1,0,0],[1,2,1,0,0],[2,2,2,0,0],[3,2,2,0,0]] @@ -1007,7 +1011,7 @@ def tdm_sampler( import paddle.fluid as fluid import numpy as np paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1], dtype="int32", lod_level=1) + x = paddle.static.data(name="x", shape=[None, 1], dtype="int32", lod_level=1) travel_list = [[1, 3], [1, 4], [2, 5], [2, 6]] # leaf node's travel path, shape(leaf_node_num, layer_num) layer_list_flat = [[1], [2], [3], [4], [5], [6]] # shape(node_nums, 1) @@ -1197,18 +1201,17 @@ def rank_attention( Examples: .. code-block:: python import paddle.fluid as fluid - import numpy as np + import paddle + paddle.enable_static() - input = fluid.data(name="input", shape=[None, 2], dtype="float32") - rank_offset = fluid.data(name="rank_offset", shape=[None, 7], dtype="int32") + input = paddle.static.data(name="input", shape=[None, 2], dtype="float32") + rank_offset = paddle.static.data(name="rank_offset", shape=[None, 7], dtype="int32") out = fluid.contrib.layers.rank_attention(input=input, rank_offset=rank_offset, rank_param_shape=[18,3], rank_param_attr= - fluid.ParamAttr(learning_rate=1.0, - name="ubm_rank_param.w_0", - initializer= - fluid.initializer.Xavier(uniform=False)), + paddle.ParamAttr(learning_rate=1.0, + name="ubm_rank_param.w_0"), max_rank=3, max_size=0) """ @@ -1259,22 +1262,21 @@ def batch_fc(input, param_size, param_attr, bias_size, bias_attr, act=None): Examples: .. code-block:: python import paddle.fluid as fluid + import paddle - input = fluid.data(name="input", shape=[16, 2, 3], dtype="float32") + paddle.enable_static() + + input = paddle.static.data(name="input", shape=[16, 2, 3], dtype="float32") out = fluid.contrib.layers.batch_fc(input=input, param_size=[16, 3, 10], param_attr= - fluid.ParamAttr(learning_rate=1.0, - name="w_0", - initializer= - fluid.initializer.Xavier(uniform=False)), + paddle.ParamAttr(learning_rate=1.0, + name="w_0"), bias_size=[16, 10], bias_attr= - fluid.ParamAttr(learning_rate=1.0, - name="b_0", - initializer= - fluid.initializer.Xavier(uniform=False)), - act="relu") + paddle.ParamAttr(learning_rate=1.0, + name="b_0"), + act="relu") """ helper = LayerHelper("batch_fc", **locals()) @@ -1380,10 +1382,12 @@ def bilateral_slice(x, guide, grid, has_offset, name=None): .. code-block:: python import paddle.fluid as fluid + import paddle + paddle.enable_static() - x = fluid.data(name='x', shape=[None, 3, 101, 60], dtype='float32') - guide = fluid.data(name='guide', shape=[None, 101, 60], dtype='float32') - grid = fluid.data(name='grid', shape=[None, 12, 8, 10, 6], dtype='float32') + x = paddle.randn(name='x', shape=[1, 3, 101, 60], dtype='float32') + guide = paddle.randn(name='guide', shape=[1, 101, 60], dtype='float32') + grid = paddle.randn(name='grid', shape=[1, 12, 8, 10, 6], dtype='float32') # without offset output = fluid.contrib.bilateral_slice(x, guide, grid, has_offset=False) diff --git a/python/paddle/fluid/data.py b/python/paddle/fluid/data.py deleted file mode 100644 index 00173a29c28..00000000000 --- a/python/paddle/fluid/data.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np - -from paddle.fluid import core -from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.data_feeder import check_dtype, check_type -from ..utils import deprecated -from paddle.fluid.framework import static_only - -__all__ = ['data'] - - -@static_only -@deprecated(since="2.0.0", update_to="paddle.static.data") -def data(name, shape, dtype='float32', lod_level=0): - """ - **Data Layer** - - This function creates a variable on the global block. The global variable - can be accessed by all the following operators in the graph. The variable - is a placeholder that could be fed with input, such as Executor can feed - input into the variable. - - Note: - `paddle.fluid.layers.data` is deprecated. It will be removed in a - future version. Please use this `paddle.fluid.data`. - - The `paddle.fluid.layers.data` set shape and dtype at compile time but - does NOT check the shape or the dtype of fed data, this - `paddle.fluid.data` checks the shape and the dtype of data fed by - Executor or ParallelExecutor during run time. - - To feed variable size inputs, users can set None or -1 on the variable - dimension when using :code:`paddle.fluid.data`, or feed variable size - inputs directly to :code:`paddle.fluid.layers.data` and PaddlePaddle - will fit the size accordingly. - - The default :code:`stop_gradient` attribute of the Variable created by - this API is true, which means the gradient won't be passed backward - through the data Variable. Set :code:`var.stop_gradient = False` If - user would like to pass backward gradient. - - Args: - name (str): The name/alias of the variable, see :ref:`api_guide_Name` - for more details. - shape (list|tuple): List|Tuple of integers declaring the shape. You can - set "None" or -1 at a dimension to indicate the dimension can be of any - size. For example, it is useful to set changeable batch size as "None" or -1. - dtype (np.dtype|VarType|str, optional): The type of the data. Supported - dtype: bool, float16, float32, float64, int8, int16, int32, int64, - uint8. Default: float32. - lod_level (int, optional): The LoD level of the LoDTensor. Usually users - don't have to set this value. For more details about when and how to - use LoD level, see :ref:`user_guide_lod_tensor` . Default: 0. - - Returns: - Variable: The global variable that gives access to the data. - - Examples: - .. code-block:: python - - import paddle - import paddle.fluid as fluid - import numpy as np - paddle.enable_static() - - # Creates a variable with fixed size [3, 2, 1] - # User can only feed data of the same shape to x - x = fluid.data(name='x', shape=[3, 2, 1], dtype='float32') - - # Creates a variable with changeable batch size -1. - # Users can feed data of any batch size into y, - # but size of each data sample has to be [2, 1] - y = fluid.data(name='y', shape=[-1, 2, 1], dtype='float32') - - z = x + y - - # In this example, we will feed x and y with np-ndarray "1" - # and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle - feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32) - - exe = fluid.Executor(fluid.CPUPlace()) - out = exe.run(fluid.default_main_program(), - feed={ - 'x': feed_data, - 'y': feed_data - }, - fetch_list=[z.name]) - - # np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2 - print(out) - - """ - helper = LayerHelper('data', **locals()) - - check_type(name, 'name', (bytes, str), 'data') - check_type(shape, 'shape', (list, tuple), 'data') - - shape = list(shape) - for i in range(len(shape)): - if shape[i] is None: - shape[i] = -1 - - return helper.create_global_variable( - name=name, - shape=shape, - dtype=dtype, - type=core.VarDesc.VarType.LOD_TENSOR, - stop_gradient=True, - lod_level=lod_level, - is_data=True, - need_check_feed=True, - ) diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index 7fee96e6477..e74e336ab59 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -347,8 +347,8 @@ class DataFeeder: startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - data_1 = fluid.data(name='data_1', shape=[None, 2, 2], dtype='float32') - data_2 = fluid.data(name='data_2', shape=[None, 1, 3], dtype='float32') + data_1 = paddle.static.data(name='data_1', shape=[None, 2, 2], dtype='float32') + data_2 = paddle.static.data(name='data_2', shape=[None, 1, 3], dtype='float32') out = paddle.static.nn.fc(x=[data_1, data_2], size=2) # ... feeder = fluid.DataFeeder([data_1, data_2], place) @@ -414,9 +414,9 @@ class DataFeeder: for i in range(1, limit + 1): yield np.ones([6]).astype('float32') * i , np.ones([1]).astype('int64') * i, np.random.random([9]).astype('float32') - data_1 = fluid.data(name='data_1', shape=[None, 2, 1, 3]) - data_2 = fluid.data(name='data_2', shape=[None, 1], dtype='int64') - data_3 = fluid.data(name='data_3', shape=[None, 3, 3], dtype='float32') + data_1 = paddle.static.data(name='data_1', shape=[None, 2, 1, 3]) + data_2 = paddle.static.data(name='data_2', shape=[None, 1], dtype='int64') + data_3 = paddle.static.data(name='data_3', shape=[None, 3, 3], dtype='float32') feeder = fluid.DataFeeder(['data_1','data_2', 'data_3'], fluid.CPUPlace()) @@ -482,8 +482,8 @@ class DataFeeder: yield np.ones([4]) * factor + base, np.ones([4]) * factor + base + 5 return _reader() - x = fluid.data(name='x', shape=[None, 2, 2]) - y = fluid.data(name='y', shape=[None, 2, 2], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 2, 2]) + y = paddle.static.data(name='y', shape=[None, 2, 2], dtype='float32') z = paddle.add(x, y) @@ -582,8 +582,8 @@ class DataFeeder: places = [fluid.CPUPlace() for _ in range(place_num)] # a simple network sample - data = fluid.data(name='data', shape=[None, 4, 4], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + data = paddle.static.data(name='data', shape=[None, 4, 4], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') hidden = paddle.static.nn.fc(x=data, size=10) feeder = fluid.DataFeeder(place=places[0], feed_list=[data, label]) diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 2c5713e3761..3b018f30c64 100755 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -1687,7 +1687,7 @@ class Executor: compiled = isinstance(program, compiler.CompiledProgram) - # Check if fluid.data() variable no feed data + # Check if paddle.static.data() variable no feed data if use_prune: if compiled: global_block = program._program.global_block() diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index a838f06dbe8..43c23eb003e 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -2072,9 +2072,9 @@ class Variable(metaclass=VariableMetaClass): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle - x = fluid.data(name="x", shape=[-1, 23, 48], dtype='float32') + x = paddle.static.data(name="x", shape=[-1, 23, 48], dtype='float32') print(x.grad_name) # output is ``x@GRAD`` """ diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index f61b4bbe233..9a3e3c9b835 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -190,8 +190,8 @@ def save_inference_model( path = "./infer_model" # User defined network, here a softmax regession example - image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace()) predict = paddle.static.nn.fc(x=image, size=10, activation='softmax') diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 0697b53914f..b1f9a03d0d9 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -335,7 +335,7 @@ class StaticRNN: vocab_size, hidden_size=10000, 200 paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64') # create word sequence x_emb = layers.embedding( input=x, @@ -426,7 +426,7 @@ class StaticRNN: vocab_size, hidden_size=10000, 200 paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64') # create word sequence x_emb = layers.embedding( input=x, @@ -455,7 +455,7 @@ class StaticRNN: import paddle.fluid.layers as layers vocab_size, hidden_size=10000, 200 paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64') # create word sequence x_emb = layers.embedding( input=x, @@ -558,7 +558,7 @@ class StaticRNN: vocab_size, hidden_size=10000, 200 paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64') # create word sequence x_emb = layers.embedding( input=x, @@ -611,7 +611,7 @@ class StaticRNN: vocab_size, hidden_size=10000, 200 paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64') # create word sequence x_emb = layers.embedding( input=x, @@ -673,7 +673,7 @@ class StaticRNN: vocab_size, hidden_size=10000, 200 paddle.enable_static() - x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64') + x = paddle.static.data(name="x", shape=[None, 1, 1], dtype='int64') # create word sequence x_emb = layers.embedding( input=x, @@ -955,7 +955,7 @@ class While: i = paddle.full(shape=[1], dtype='int64', fill_value=0) loop_len = paddle.full(shape=[1], dtype='int64', fill_value=10) one = paddle.full(shape=[1], dtype='float32', fill_value=1) - data = fluid.data(name='data', shape=[1], dtype='float32') + data = paddle.static.data(name='data', shape=[1], dtype='float32') sums = paddle.full(shape=[1], dtype='float32', fill_value=0) # Define the variable to be obtained ouside of While, which name should be different from the variable inside the While to be obtained cond = paddle.less_than(x=i, y=loop_len) diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index 136670bf27c..01426a0c792 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -183,13 +183,13 @@ def monkey_patch_variable(): In Static Graph Mode: .. code-block:: python - + import paddle import paddle.fluid as fluid - + paddle.enable_static() startup_prog = fluid.Program() main_prog = fluid.Program() with fluid.program_guard(startup_prog, main_prog): - original_variable = fluid.data(name = "new_variable", shape=[2,2], dtype='float32') + original_variable = paddle.static.data(name = "new_variable", shape=[2,2], dtype='float32') new_variable = original_variable.astype('int64') print("new var's dtype is: {}".format(new_variable.dtype)) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 441c737c064..a2d962d11d1 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -206,7 +206,7 @@ def embedding( import paddle paddle.enable_static() - data = fluid.data(name='x', shape=[None, 1], dtype='int64') + data = paddle.static.data(name='x', shape=[None, 1], dtype='int64') # example 1 emb_1 = paddle.static.nn.embedding(input=data, size=[128, 64]) @@ -572,7 +572,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] # Each example is followed by the corresponding output tensor. - x = fluid.data(name='x', shape=[2, 4], dtype='float32') + x = paddle.static.data(name='x', shape=[2, 4], dtype='float32') fluid.layers.nn.reduce_sum(x) # [3.5] fluid.layers.nn.reduce_sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6] fluid.layers.nn.reduce_sum(x, dim=-1) # [1.9, 1.6] @@ -582,7 +582,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): # [[[1, 2], [3, 4]], # [[5, 6], [7, 8]]] # Each example is followed by the corresponding output tensor. - y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') + y = paddle.static.data(name='y', shape=[2, 2, 2], dtype='float32') fluid.layers.nn.reduce_sum(y, dim=[1, 2]) # [10, 26] fluid.layers.nn.reduce_sum(y, dim=[0, 1]) # [16, 20] diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index caacc658a31..324f6227983 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -111,7 +111,7 @@ def simple_img_conv_pool( import paddle.fluid as fluid import paddle paddle.enable_static() - img = fluid.data(name='img', shape=[100, 1, 28, 28], dtype='float32') + img = paddle.static.data(name='img', shape=[100, 1, 28, 28], dtype='float32') conv_pool = fluid.nets.simple_img_conv_pool(input=img, filter_size=5, num_filters=20, @@ -214,7 +214,7 @@ def img_conv_group( import paddle paddle.enable_static() - img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32') + img = paddle.static.data(name='img', shape=[None, 1, 28, 28], dtype='float32') conv_pool = fluid.nets.img_conv_group(input=img, conv_padding=1, conv_num_filter=[3, 3], @@ -331,7 +331,7 @@ def sequence_conv_pool( input_dim = 100 #len(word_dict) emb_dim = 128 hid_dim = 512 - data = fluid.data(name="words", shape=[None, 1], dtype="int64", lod_level=1) + data = paddle.static.data(name="words", shape=[None, 1], dtype="int64", lod_level=1) emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim], is_sparse=True) seq_conv = fluid.nets.sequence_conv_pool(input=emb, num_filters=hid_dim, @@ -391,7 +391,7 @@ def glu(input, dim=-1): import paddle paddle.enable_static() - data = fluid.data( + data = paddle.static.data( name="words", shape=[-1, 6, 3, 9], dtype="float32") # shape of output: [-1, 3, 3, 9] output = fluid.nets.glu(input=data, dim=1) @@ -472,9 +472,9 @@ def scaled_dot_product_attention( import paddle paddle.enable_static() - queries = fluid.data(name="queries", shape=[3, 5, 9], dtype="float32") - keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32") - values = fluid.data(name="values", shape=[3, 6, 10], dtype="float32") + queries = paddle.static.data(name="queries", shape=[3, 5, 9], dtype="float32") + keys = paddle.static.data(name="keys", shape=[3, 6, 9], dtype="float32") + values = paddle.static.data(name="values", shape=[3, 6, 10], dtype="float32") contexts = fluid.nets.scaled_dot_product_attention(queries, keys, values) contexts.shape # [3, 5, 10] """ diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 3e5b4babc69..d835a3fbfcf 100755 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -2036,7 +2036,7 @@ class AdagradOptimizer(Optimizer): paddle.enable_static() np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) - inp = fluid.data(name="inp", shape=[2, 2]) + inp = paddle.static.data(name="inp", shape=[2, 2], dtype="float32") out = paddle.static.nn.fc(inp, size=3) out = paddle.sum(out) optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.2) @@ -2228,8 +2228,8 @@ class AdamOptimizer(Optimizer): place = fluid.CPUPlace() main = fluid.Program() with fluid.program_guard(main): - x = fluid.data(name='x', shape=[None, 13], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[None, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -2257,8 +2257,8 @@ class AdamOptimizer(Optimizer): place = fluid.CPUPlace() main = fluid.Program() with fluid.program_guard(main): - x = fluid.data(name='x', shape=[None, 13], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[None, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -2292,8 +2292,8 @@ class AdamOptimizer(Optimizer): div_res = global_step / decay_steps decayed_beta1 = beta1_init * (decay_rate**div_res) decayed_beta2 = beta2_init * (decay_rate**div_res) - fluid.layers.assign(decayed_beta1, beta1) - fluid.layers.assign(decayed_beta2, beta2) + paddle.assign(decayed_beta1, beta1) + paddle.assign(decayed_beta2, beta2) return beta1, beta2, epsilon @@ -2651,7 +2651,7 @@ class AdamaxOptimizer(Optimizer): train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) adam = fluid.optimizer.AdamaxOptimizer(learning_rate=0.2) @@ -2994,7 +2994,7 @@ class DecayedAdagradOptimizer(Optimizer): import paddle.fluid as fluid paddle.enable_static() - x = fluid.data(name='x', shape=[None, 10], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 10], dtype='float32') trans = paddle.static.nn.fc(x, 100) cost = paddle.mean(trans) optimizer = fluid.optimizer.DecayedAdagradOptimizer(learning_rate=0.2) @@ -3118,7 +3118,7 @@ class AdadeltaOptimizer(Optimizer): import paddle.fluid as fluid paddle.enable_static() - image = fluid.data(name='image', shape=[None, 28], dtype='float32') + image = paddle.static.data(name='image', shape=[None, 28], dtype='float32') fc = paddle.static.nn.fc(image, size=10) cost = paddle.mean(fc) optimizer = fluid.optimizer.Adadelta( @@ -3747,7 +3747,7 @@ class LambOptimizer(AdamOptimizer): import paddle.fluid as fluid paddle.enable_static() - data = fluid.data(name='x', shape=[-1, 5], dtype='float32') + data = paddle.static.data(name='x', shape=[-1, 5], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) cost = paddle.mean(hidden) @@ -3964,7 +3964,7 @@ class ModelAverage(Optimizer): startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): # build net - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1) @@ -4143,7 +4143,7 @@ class ModelAverage(Optimizer): startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): # build net - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1) @@ -4199,7 +4199,7 @@ class ModelAverage(Optimizer): startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): # build net - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data(name='X', shape=[None, 1], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1) diff --git a/python/paddle/fluid/profiler.py b/python/paddle/fluid/profiler.py index 9b29b01fd60..750ea5d8e13 100644 --- a/python/paddle/fluid/profiler.py +++ b/python/paddle/fluid/profiler.py @@ -84,10 +84,11 @@ def npu_profiler(output_file, config=None): import paddle.fluid as fluid import paddle.fluid.profiler as profiler import numpy as np + import paddle epoc = 8 dshape = [4, 3, 28, 28] - data = fluid.data(name='data', shape=[None, 3, 28, 28], dtype='float32') + data = paddle.static.data(name='data', shape=[None, 3, 28, 28], dtype='float32') conv = paddle.static.nn.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) place = fluid.NPUPlace(0) @@ -337,7 +338,7 @@ def profiler( epoc = 8 dshape = [4, 3, 28, 28] - data = fluid.data(name='data', shape=[None, 3, 28, 28], dtype='float32') + data = paddle.static.data(name='data', shape=[None, 3, 28, 28], dtype='float32') conv = paddle.static.nn.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py index b9ed17304c8..d36542da09b 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/fluid/reader.py @@ -655,7 +655,7 @@ class DataLoader: Args: feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list. - The Tensors should be created by :code:`fluid.data()`. + The Tensors should be created by :code:`paddle.static.data()`. capacity (int): capacity of the queue maintained in DataLoader. The unit is batch number. Set larger capacity if your reader is fast. @@ -1651,8 +1651,8 @@ class PyReader(DataLoaderBase): yield fake_image, fake_label return reader - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, @@ -1708,8 +1708,8 @@ class PyReader(DataLoaderBase): yield fake_image, fake_label return reader - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False) user_defined_reader = reader_creator_random_image(784, 784) @@ -1800,7 +1800,7 @@ class PyReader(DataLoaderBase): for i in range(5): yield np.random.uniform(low=0, high=255, size=[784, 784]), - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) reader.decorate_sample_list_generator( paddle.batch(generator, batch_size=BATCH_SIZE)) @@ -1837,7 +1837,7 @@ class PyReader(DataLoaderBase): for i in range(5): yield np.random.uniform(low=0, high=255, size=[784, 784]), - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) reader.decorate_sample_list_generator( paddle.batch(generator, batch_size=BATCH_SIZE)) @@ -1908,8 +1908,8 @@ class PyReader(DataLoaderBase): yield fake_image, fake_label return generator - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) @@ -1975,8 +1975,8 @@ class PyReader(DataLoaderBase): yield fake_image, fake_label return generator - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) @@ -2043,8 +2043,8 @@ class PyReader(DataLoaderBase): yield batch_image, batch_label return generator - image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index 39b17cd0634..9bc9b39dbe9 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -85,17 +85,19 @@ class TestGenerateProposals(LayerTest): variances_np = np.ones((4, 4, 3, 4)).astype('float32') with self.static_graph(): - scores = fluid.data( + scores = paddle.static.data( name='scores', shape=[2, 3, 4, 4], dtype='float32' ) - bbox_deltas = fluid.data( + bbox_deltas = paddle.static.data( name='bbox_deltas', shape=[2, 12, 4, 4], dtype='float32' ) - im_info = fluid.data(name='im_info', shape=[2, 3], dtype='float32') - anchors = fluid.data( + im_info = paddle.static.data( + name='im_info', shape=[2, 3], dtype='float32' + ) + anchors = paddle.static.data( name='anchors', shape=[4, 4, 3, 4], dtype='float32' ) - variances = fluid.data( + variances = paddle.static.data( name='var', shape=[4, 4, 3, 4], dtype='float32' ) rois, roi_probs, rois_num = paddle.vision.ops.generate_proposals( @@ -175,8 +177,12 @@ class TestDistributeFpnProposals(LayerTest): rois_np = np.random.rand(10, 4).astype('float32') rois_num_np = np.array([4, 6]).astype('int32') with self.static_graph(): - rois = fluid.data(name='rois', shape=[10, 4], dtype='float32') - rois_num = fluid.data(name='rois_num', shape=[None], dtype='int32') + rois = paddle.static.data( + name='rois', shape=[10, 4], dtype='float32' + ) + rois_num = paddle.static.data( + name='rois_num', shape=[None], dtype='int32' + ) ( multi_rois, restore_ind, @@ -230,7 +236,7 @@ class TestDistributeFpnProposals(LayerTest): def test_distribute_fpn_proposals_error(self): program = Program() with program_guard(program): - fpn_rois = fluid.data( + fpn_rois = paddle.static.data( name='data_error', shape=[10, 4], dtype='int32', lod_level=1 ) self.assertRaises( diff --git a/python/paddle/fluid/tests/unittests/asp/asp_pruning_base.py b/python/paddle/fluid/tests/unittests/asp/asp_pruning_base.py index 50fb039974c..bac539a437f 100644 --- a/python/paddle/fluid/tests/unittests/asp/asp_pruning_base.py +++ b/python/paddle/fluid/tests/unittests/asp/asp_pruning_base.py @@ -31,10 +31,12 @@ class TestASPHelperPruningBase(unittest.TestCase): self.startup_program = fluid.Program() def build_model(): - img = fluid.data( + img = paddle.static.data( name='img', shape=[None, 3, 32, 32], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = paddle.static.nn.conv2d( input=img, num_filters=4, filter_size=3, padding=2, act="relu" ) diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_customized_pruning.py b/python/paddle/fluid/tests/unittests/asp/test_asp_customized_pruning.py index 2379e9e7029..38fce44d1ec 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_customized_pruning.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_customized_pruning.py @@ -196,10 +196,12 @@ class TestASPStaticCustomerizedPruneFunc(unittest.TestCase): self.customer_prefix = "customer_layer" def build_model(): - img = fluid.data( + img = paddle.static.data( name='img', shape=[None, 3, 32, 32], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = paddle.static.nn.conv2d( input=img, num_filters=4, filter_size=3, padding=2, act="relu" ) diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py b/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py index 67b14cc549c..4eca7d6adee 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py @@ -31,10 +31,12 @@ class TestASPStaticOptimize(unittest.TestCase): self.startup_program = fluid.Program() def build_model(): - img = fluid.data( + img = paddle.static.data( name='img', shape=[None, 3, 24, 24], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = paddle.static.nn.conv2d( input=img, num_filters=4, filter_size=3, padding=2, act="relu" ) diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_static.py b/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_static.py index cf011874ea8..669dc9b0939 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_static.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_pruning_static.py @@ -31,10 +31,12 @@ class TestASPStaticPruningBase(unittest.TestCase): self.startup_program = fluid.Program() def build_model(): - img = fluid.data( + img = paddle.static.data( name='img', shape=[None, 3, 24, 24], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = paddle.static.nn.conv2d( input=img, num_filters=2, filter_size=3, padding=2, act="relu" ) diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_save_load.py b/python/paddle/fluid/tests/unittests/asp/test_asp_save_load.py index b4876bdce53..5cd3f4b8e3e 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_save_load.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_save_load.py @@ -128,10 +128,12 @@ class TestASPStaticOptimize(unittest.TestCase): self.startup_program = fluid.Program() def build_model(): - img = fluid.data( + img = paddle.static.data( name='img', shape=[None, 3, 32, 32], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = paddle.static.nn.conv2d( input=img, num_filters=4, filter_size=3, padding=2, act="relu" ) diff --git a/python/paddle/fluid/tests/unittests/auto_checkpoint_utils.py b/python/paddle/fluid/tests/unittests/auto_checkpoint_utils.py index d42c9f0ffe5..29ec0c67cde 100644 --- a/python/paddle/fluid/tests/unittests/auto_checkpoint_utils.py +++ b/python/paddle/fluid/tests/unittests/auto_checkpoint_utils.py @@ -65,8 +65,12 @@ class AutoCheckpointBase(unittest.TestCase): self, exe, main_prog, startup_prog, minimize=True, iterable=True ): def simple_net(): - image = fluid.data(name='image', shape=[-1, 4, 4], dtype='float32') - label = fluid.data(name='label', shape=[-1, 1], dtype='int64') + image = paddle.static.data( + name='image', shape=[-1, 4, 4], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' + ) fc_tmp = paddle.static.nn.fc(image, size=CLASS_NUM) cross_entropy = paddle.nn.functional.softmax_with_cross_entropy( diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_col.py b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_col.py index 035a174775b..81c05805aca 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_col.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_col.py @@ -71,7 +71,7 @@ def create_model(data, rank): class TestModelParallel(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data - data_in = fluid.data( + data_in = paddle.static.data( name='data_in', shape=[batch_size, IN_SIZE], dtype=DTYPE ) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_row.py b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_row.py index a480993e8ec..162ef1c7b61 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_row.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_by_row.py @@ -75,7 +75,7 @@ def create_model(data, rank): class TestModelParallel(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data - data_in = fluid.data( + data_in = paddle.static.data( name='data_in', shape=[batch_size, IN_SIZE], dtype=DTYPE ) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_embedding.py b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_embedding.py index 689b068f025..209af435c13 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_embedding.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/static_model_parallel_embedding.py @@ -65,7 +65,7 @@ def create_model(data, rank): class TestModelParallel(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data - data_in = fluid.data( + data_in = paddle.static.data( name='data_in', shape=[batch_size, IN_SIZE], dtype=DTYPE ) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_checkpoint.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_checkpoint.py index 089a06c9ef1..1e80af9f104 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_checkpoint.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_checkpoint.py @@ -35,8 +35,10 @@ class FleetTest(unittest.TestCase): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + image = paddle.static.data( + name='img', shape=[None, 28, 28], dtype='float32' + ) + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') feeder = fluid.DataFeeder( feed_list=[image, label], place=fluid.CPUPlace() ) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py index 91e5b225767..44632187268 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_categorical.py @@ -103,13 +103,13 @@ class CategoricalTest(unittest.TestCase): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.logits_static = fluid.data( + self.logits_static = paddle.static.data( name='logits', shape=self.logits_shape, dtype='float32' ) - self.other_logits_static = fluid.data( + self.other_logits_static = paddle.static.data( name='other_logits', shape=self.logits_shape, dtype='float32' ) - self.value_static = fluid.data( + self.value_static = paddle.static.data( name='value', shape=self.value_shape, dtype='int64' ) @@ -211,13 +211,13 @@ class CategoricalTest2(CategoricalTest): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.logits_static = fluid.data( + self.logits_static = paddle.static.data( name='logits', shape=self.logits_shape, dtype='float64' ) - self.other_logits_static = fluid.data( + self.other_logits_static = paddle.static.data( name='other_logits', shape=self.logits_shape, dtype='float64' ) - self.value_static = fluid.data( + self.value_static = paddle.static.data( name='value', shape=self.value_shape, dtype='int64' ) @@ -234,7 +234,7 @@ class CategoricalTest3(CategoricalTest): with fluid.program_guard(self.test_program): self.logits_static = self.logits_np self.other_logits_static = self.other_logits_np - self.value_static = fluid.data( + self.value_static = paddle.static.data( name='value', shape=self.value_shape, dtype='int64' ) @@ -263,7 +263,7 @@ class CategoricalTest4(CategoricalTest): with fluid.program_guard(self.test_program): self.logits_static = self.logits_np self.other_logits_static = self.other_logits_np - self.value_static = fluid.data( + self.value_static = paddle.static.data( name='value', shape=self.value_shape, dtype='int64' ) @@ -344,7 +344,7 @@ class CategoricalTest8(CategoricalTest): with fluid.program_guard(self.test_program): self.logits_static = self.logits_np.tolist() self.other_logits_static = self.other_logits_np.tolist() - self.value_static = fluid.data( + self.value_static = paddle.static.data( name='value', shape=self.value_shape, dtype='int64' ) @@ -361,7 +361,7 @@ class CategoricalTest9(CategoricalTest): with fluid.program_guard(self.test_program): self.logits_static = tuple(self.logits_np.tolist()) self.other_logits_static = tuple(self.other_logits_np.tolist()) - self.value_static = fluid.data( + self.value_static = paddle.static.data( name='value', shape=self.value_shape, dtype='int64' ) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_static_analysis.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_static_analysis.py index 23eb6964c3e..8799126c629 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_static_analysis.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_static_analysis.py @@ -108,7 +108,7 @@ def func_to_test5(): a = inner_int_func() b = inner_bool_float_func(3) c = inner_unknown_func(None) - d = paddle.fluid.data('x', [1, 2]) + d = paddle.static.data('x', [1, 2]) result_var_type5 = { diff --git a/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py index 14a8d69a8e5..6779cfe81e1 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_fill_any_like_op_ipu.py @@ -69,7 +69,7 @@ class TestCase1(TestBase): class TestError(TestBase): @IPUOpTest.static_graph def build_model(self): - x = paddle.fluid.data('x', [-1, 3, 13], 'float32') + x = paddle.static.data('x', [-1, 3, 13], 'float32') x_fill = paddle.full_like(x, **self.attrs) out = paddle.add(x_fill, x_fill) self.fetch_list = [out.name] diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_cpu_bfloat16_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_cpu_bfloat16_pass.py index 342b897d003..331c5f49385 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_cpu_bfloat16_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_cpu_bfloat16_pass.py @@ -26,7 +26,7 @@ class TestMKLDNNCpuBfloat16Pass(InferencePassTest): def setUp(self): self.init_data() with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data( + x = paddle.static.data( name='x', shape=[-1] + self.shape_x, dtype=self.d_type ) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py index 24a63751cfe..6fad65569f7 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_elt_act_fuse_pass.py @@ -31,10 +31,10 @@ class ElementwiseActivationMkldnnFusePassTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data_A = fluid.data( + data_A = paddle.static.data( name="data_A", shape=[-1, 3, 100, 100], dtype="float32" ) - data_B = fluid.data( + data_B = paddle.static.data( name="data_B", shape=[-1, 3, 100, 100], dtype="float32" ) elt_out = self.operand(data_A, data_B) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py index e344c873ee2..302adcae3ba 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py @@ -32,10 +32,10 @@ class TestMKLDNNMatmulFuseOp(InferencePassTest): def make_network(self): with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data( + x = paddle.static.data( name='x', shape=[-1] + self.shape_x, dtype=self.d_type ) - y = fluid.data( + y = paddle.static.data( name='y', shape=[-1] + self.shape_y, dtype=self.d_type ) out = paddle.matmul(x, y) @@ -74,10 +74,10 @@ class TestMKLDNNMatmulOtherDimsFuseOp(TestMKLDNNMatmulFuseOp): class TestMKLDNNMatmulOpNotFusedWrongTransposeAxis(TestMKLDNNMatmulFuseOp): def make_network(self): with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data( + x = paddle.static.data( name='x', shape=[-1] + self.shape_x, dtype=self.d_type ) - y = fluid.data( + y = paddle.static.data( name='y', shape=[-1] + self.shape_y, dtype=self.d_type ) out = paddle.matmul(x, y) @@ -97,10 +97,10 @@ class TestMKLDNNMatmulOpNotFusedBreakPattern(TestMKLDNNMatmulFuseOp): def make_network(self): with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data( + x = paddle.static.data( name='x', shape=[-1] + self.shape_x, dtype=self.d_type ) - y = fluid.data( + y = paddle.static.data( name='y', shape=[-1] + self.shape_y, dtype=self.d_type ) out = paddle.matmul(x, y) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py index ad2f5777f20..0fc9cabcf7e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py @@ -29,7 +29,7 @@ class TestReshapeTransposeMatmulV2OneDNNFusePass(InferencePassTest): self.pass_name = 'reshape_transpose_matmul_mkldnn_fuse_pass' with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=self.data_shape, dtype="float32" ) weight = paddle.create_parameter( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py index 0c205fbee7c..11a81dd6899 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py @@ -37,7 +37,7 @@ class TensorRTSubgraphPassActivationTest(InferencePassTest): def setUp(self): self.setUpTensorRTParam() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 32, 32], dtype="float32" ) act_out = self.append_act(data) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_op.py index 592dede838e..63de86f9b30 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_op.py @@ -28,7 +28,7 @@ class TensorRTSubgraphPassConv3dTest(InferencePassTest): self.init_params() self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 6, 32, 32], dtype="float32" ) conv_out = paddle.static.nn.conv3d( @@ -112,7 +112,7 @@ class DynamicShapeTensorRTSubgraphPassConv3dTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, -1, -1, -1], dtype="float32" ) conv_out = paddle.static.nn.conv3d( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_transpose_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_transpose_op.py index b45b8dc17dc..6f350df963f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv3d_transpose_op.py @@ -27,7 +27,7 @@ class TensorRTSubgraphPassConv3dTransposeTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 4, 4, 32, 32], dtype="float32" ) conv_out = paddle.static.nn.conv3d_transpose( @@ -94,7 +94,7 @@ class DynamicShapeTensorRTSubgraphPassConv3dTransposeTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, -1, -1, -1], dtype="float32" ) conv_out = paddle.static.nn.conv3d_transpose( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_pass.py index 9b6ab8287f6..45588525cf4 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_pass.py @@ -30,7 +30,7 @@ class TensorRTSubgraphPassConvTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) conv_out = paddle.static.nn.conv2d( @@ -108,7 +108,7 @@ class TensorRTSubgraphPassConvTransposeTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) conv_out = paddle.static.nn.conv2d_transpose( @@ -207,7 +207,7 @@ class DynamicShapeTensorRTSubgraphPassConvTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, -1, -1], dtype="float32" ) conv_out = paddle.static.nn.conv2d( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_quant_dequant_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_quant_dequant_pass.py index b34e128ddf5..b3b99aa438d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_quant_dequant_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_quant_dequant_pass.py @@ -29,11 +29,13 @@ class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): self.set_params() def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) data_reshape = paddle.reshape(self.data, shape=[1, 4, 14, 14]) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) label_shape = paddle.reshape(self.label, shape=[1, 1, 1]) conv_out = paddle.static.nn.conv2d( input=data_reshape, @@ -144,11 +146,13 @@ class DynamicShapeQuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): self.set_params() def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) data_reshape = paddle.reshape(self.data, shape=[1, 4, 14, 14]) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) label_shape = paddle.reshape(self.label, shape=[1, 1, 1]) conv_out = paddle.static.nn.conv2d( input=data_reshape, @@ -243,11 +247,13 @@ class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest): self.set_params() def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) data_reshape = paddle.reshape(self.data, shape=[1, 4, 14, 14]) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) label_shape = paddle.reshape(self.label, shape=[1, 1, 1]) conv_out = paddle.static.nn.conv2d_transpose( input=data_reshape, diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_deformable_conv.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_deformable_conv.py index 253a2b5eb8a..3178226491b 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_deformable_conv.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_deformable_conv.py @@ -30,13 +30,13 @@ class TRTDeformableConvTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - input = fluid.data( + input = paddle.static.data( name='input', shape=self.input_size, dtype=self.dtype ) - offset = fluid.data( + offset = paddle.static.data( name='offset', shape=self.offset_size, dtype=self.dtype ) - mask = fluid.data( + mask = paddle.static.data( name='mask', shape=self.mask_size, dtype=self.dtype ) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_dynamic_shape.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_dynamic_shape.py index 98ca955ee94..acd60260948 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_dynamic_shape.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_dynamic_shape.py @@ -26,7 +26,7 @@ from paddle.fluid.core import AnalysisConfig class TRTDynamicShapeTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 16, 16], dtype="float32" ) out = paddle.static.nn.conv2d( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py index 95055040606..87bfb350337 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_elementwise_op.py @@ -29,10 +29,10 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TensorRTSubgraphPassElementwiseBroadcastTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[-1, 3, 64, 64], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[-1, 3, 64, 1], dtype="float32" ) eltwise_out = self.append_eltwise(data1, data2) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_pass.py index 252ea329edb..676fbdda099 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_pass.py @@ -26,7 +26,7 @@ from paddle.fluid.core import AnalysisConfig class FCFusePassTRTTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[32, 128, 2, 2], dtype="float32" ) fc_out1 = paddle.static.nn.fc( @@ -56,7 +56,7 @@ class FCFusePassTRTTest(InferencePassTest): class FCFusePassTRTStaticDims4Cols1Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[32, 128, 32, 8], dtype="float32" ) fc_out1 = paddle.static.nn.fc( @@ -84,7 +84,7 @@ class FCFusePassTRTStaticDims4Cols1Test(InferencePassTest): class FCFusePassTRTStaticDims4Cols2Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[3, 24, 16, 16], dtype="float32" ) fc_out1 = paddle.static.nn.fc( @@ -112,7 +112,9 @@ class FCFusePassTRTStaticDims4Cols2Test(InferencePassTest): class FCFusePassTRTDynamicDims2Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[32, 128], dtype="float32") + data = paddle.static.data( + name="data", shape=[32, 128], dtype="float32" + ) fc_out1 = paddle.static.nn.fc( x=data, size=64, num_flatten_dims=1, activation="relu" ) @@ -144,7 +146,9 @@ class FCFusePassTRTDynamicDims2Test(InferencePassTest): class FCFusePassTRTDynamicDims3Cols1Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[32, 128, 32], dtype="float32") + data = paddle.static.data( + name="data", shape=[32, 128, 32], dtype="float32" + ) fc_out1 = paddle.static.nn.fc( x=data, size=64, num_flatten_dims=1, activation="relu" ) @@ -176,7 +180,9 @@ class FCFusePassTRTDynamicDims3Cols1Test(InferencePassTest): class FCFusePassTRTDynamicDims3Cols2Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[32, 128, 32], dtype="float32") + data = paddle.static.data( + name="data", shape=[32, 128, 32], dtype="float32" + ) fc_out1 = paddle.static.nn.fc( x=data, size=64, num_flatten_dims=2, activation="relu" ) @@ -208,7 +214,7 @@ class FCFusePassTRTDynamicDims3Cols2Test(InferencePassTest): class FCFusePassTRTDynamicDims4Cols1Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[32, 12, 4, 6], dtype="float32" ) fc_out1 = paddle.static.nn.fc( @@ -244,7 +250,7 @@ class FCFusePassTRTDynamicDims4Cols1Test(InferencePassTest): class FCFusePassTRTDynamicDims4Cols2Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[32, 128, 32, 32], dtype="float32" ) fc_out1 = paddle.static.nn.fc( @@ -280,7 +286,7 @@ class FCFusePassTRTDynamicDims4Cols2Test(InferencePassTest): class FCFusePassTRTDynamicDims4Cols3Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[32, 128, 32, 32], dtype="float32" ) fc_out1 = paddle.static.nn.fc( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py index 5179d0330d6..313bc5e2b8b 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py @@ -27,10 +27,12 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class FCQuantDequantFusePassTRTDims3Cols1Test(QuantDequantTest): def setUp(self): def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) fc_out = paddle.static.nn.fc( x=self.data, size=10, @@ -98,10 +100,12 @@ class FCQuantDequantFusePassTRTDims3Cols1Test(QuantDequantTest): class FCQuantDequantFusePassTRTDims3Cols2Test(QuantDequantTest): def setUp(self): def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) fc_out = paddle.static.nn.fc( x=self.data, size=28, @@ -170,10 +174,12 @@ class FCQuantDequantFusePassTRTDims3Cols2Test(QuantDequantTest): class FCQuantDequantFusePassTRTDims3Cols3Test(QuantDequantTest): def setUp(self): def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) label_shape = paddle.reshape(self.label, shape=[1, 1, 1]) reshape_out = paddle.reshape(self.data, shape=[1, 14, 14, 4]) fc_out = paddle.static.nn.fc( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py index eec26fefec2..28ac16d8259 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py @@ -27,7 +27,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TRTFlattenTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) flatten_out = self.append_flatten(data) @@ -56,7 +56,7 @@ class TRTFlattenTest(InferencePassTest): class TRTFlattenDynamicTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) flatten_out = self.append_flatten(data) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_nd_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_nd_op.py index 161a3142d52..d6706b6f061 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_nd_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_nd_op.py @@ -27,8 +27,12 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TRTGatherNdTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[-1, 3, 4], dtype="float32") - index = fluid.data(name="index", shape=[-1, 2, 2], dtype="int32") + data = paddle.static.data( + name="data", shape=[-1, 3, 4], dtype="float32" + ) + index = paddle.static.data( + name="index", shape=[-1, 2, 2], dtype="int32" + ) gather_nd = paddle.gather_nd(data, index) out = nn.batch_norm(gather_nd, is_test=True) @@ -62,10 +66,12 @@ class TRTGatherNdTest(InferencePassTest): class TRTGatherNdFp16Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 1280, 192], dtype="float32" ) - index = fluid.data(name="index", shape=[-1, 1028, 2], dtype="int32") + index = paddle.static.data( + name="index", shape=[-1, 1028, 2], dtype="int32" + ) gather_nd = paddle.gather_nd(data, index) out = nn.batch_norm(gather_nd, is_test=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_op.py index 3b73ae07441..90b3baab683 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_gather_op.py @@ -27,8 +27,12 @@ class TRTGatherTest1(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name='data', shape=[-1, 128], dtype='float32') - index = fluid.data(name='index', shape=[-1, 1], dtype='int32') + data = paddle.static.data( + name='data', shape=[-1, 128], dtype='float32' + ) + index = paddle.static.data( + name='index', shape=[-1, 1], dtype='int32' + ) scale_out = paddle.gather(data, index=index) out = paddle.nn.functional.softmax(scale_out) @@ -66,8 +70,10 @@ class TRTGatherTest2(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name='data', shape=[16, 64], dtype='float32') - index = fluid.data(name='index', shape=[2], dtype='int32') + data = paddle.static.data( + name='data', shape=[16, 64], dtype='float32' + ) + index = paddle.static.data(name='index', shape=[2], dtype='int32') scale_out = paddle.gather(data, index=index) out = paddle.nn.functional.softmax(scale_out) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py index 379c3872242..923fa74701a 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_inspector.py @@ -29,7 +29,9 @@ class TensorRTInspectorTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[1, 16, 16], dtype="float32") + data = paddle.static.data( + name="data", shape=[1, 16, 16], dtype="float32" + ) matmul_out = paddle.matmul( x=data, y=data, diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_instance_norm_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_instance_norm_op.py index 4d98c8cb3f3..f9eeb2f935d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_instance_norm_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_instance_norm_op.py @@ -20,6 +20,7 @@ import unittest import numpy as np from inference_pass_test import InferencePassTest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.static.nn as nn @@ -43,7 +44,7 @@ class TRTInstanceNormTest(InferencePassTest): with fluid.program_guard(self.main_program, self.startup_program): shape = [-1, self.channel, self.height, self.width] - data = fluid.data(name='in', shape=shape, dtype='float32') + data = paddle.static.data(name='in', shape=shape, dtype='float32') instance_norm_out = nn.instance_norm(data) out = nn.batch_norm(instance_norm_out, is_test=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py index 0d10acae95c..eb27f2e0afe 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul.py @@ -28,7 +28,9 @@ class TensorRTMatMulDims2Test(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[24, 24], dtype="float32") + data = paddle.static.data( + name="data", shape=[24, 24], dtype="float32" + ) matmul_out = paddle.matmul( x=data, y=data, @@ -65,7 +67,7 @@ class TensorRTMatMulTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 24, 24], dtype="float32" ) matmul_out = paddle.matmul( @@ -126,10 +128,12 @@ class TensorRTMatMulBroadcastTest(InferencePassTest): self.set_params() place = fluid.CPUPlace() with fluid.program_guard(self.main_program, self.startup_program): - data_x = fluid.data( + data_x = paddle.static.data( name="data_x", shape=[-1, 6, 24], dtype="float32" ) - data_y = fluid.data(name="data_y", shape=[24, 16], dtype="float32") + data_y = paddle.static.data( + name="data_y", shape=[24, 16], dtype="float32" + ) matmul_out = paddle.matmul( x=data_x, y=data_y, diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py index 413002d9885..2cdb13cd278 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py @@ -29,10 +29,12 @@ class TensorRTMatMulQuantDequantDims3Test(QuantDequantTest): self.set_params() def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) matmul_out = paddle.matmul( x=self.data, y=self.data, @@ -129,10 +131,12 @@ class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest): self.set_params() def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[1, 28, 28], dtype='float32' ) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) reshape_out = paddle.reshape(self.data, shape=[1, 4, 14, 14]) matmul_out = paddle.matmul( x=reshape_out, @@ -231,10 +235,12 @@ class TensorRTMatMulQuantDequantDims3DynamicTest(QuantDequantTest): self.set_params() def network(): - self.data = fluid.data( + self.data = paddle.static.data( name='data', shape=[-1, 28, 28], dtype='float32' ) - self.label = fluid.data(name='label', shape=[1, 1], dtype='int64') + self.label = paddle.static.data( + name='label', shape=[1, 1], dtype='int64' + ) matmul_out = paddle.matmul( x=self.data, y=self.data, diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms3_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms3_op.py index ac4e399f011..5f8257d6696 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms3_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_multiclass_nms3_op.py @@ -218,10 +218,10 @@ class TensorRTMultiClassNMS3Test(InferencePassTest): def build(self): with fluid.program_guard(self.main_program, self.startup_program): - boxes = fluid.data( + boxes = paddle.static.data( name='bboxes', shape=[-1, self.num_boxes, 4], dtype='float32' ) - scores = fluid.data( + scores = paddle.static.data( name='scores', shape=[-1, self.num_classes, self.num_boxes], dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_op.py index f335bd8f823..d026a563324 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_op.py @@ -43,7 +43,7 @@ class TRTNearestInterpTest(InferencePassTest): self.origin_shape[1], self.channels, ] - data = fluid.data(name='data', shape=shape, dtype='float32') + data = paddle.static.data(name='data', shape=shape, dtype='float32') resize_out = self.append_nearest_interp(data) out = nn.batch_norm(resize_out, is_test=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_v2_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_v2_op.py index 056e5b6e292..6bccf5572a5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_nearest_interp_v2_op.py @@ -17,6 +17,7 @@ import unittest import numpy as np from inference_pass_test import InferencePassTest +import paddle import paddle.fluid.core as core import paddle.nn.functional as F import paddle.static.nn as nn @@ -43,7 +44,7 @@ class TRTNearestInterpTest(InferencePassTest): self.origin_shape[1], self.channels, ] - data = fluid.data(name='data', shape=shape, dtype='float32') + data = paddle.static.data(name='data', shape=shape, dtype='float32') resize_out = self.append_nearest_interp(data) out = nn.batch_norm(resize_out, is_test=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pad_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pad_op.py index 4b7dc7c9cb6..8070f072aee 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pad_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pad_op.py @@ -27,7 +27,7 @@ from paddle.fluid.core import AnalysisConfig class PadOpTRTTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[1, 3, 128, 128], dtype="float32" ) pad_out = paddle.nn.functional.pad( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py index f8abf50dd10..83a80479ed4 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool3d_op.py @@ -58,7 +58,7 @@ class TensorRTPool3dTest(InferencePassTest): ) with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=[-1, self.channel, self.depth, self.height, self.width], dtype='float32', @@ -190,7 +190,7 @@ class TensorRTAdaptiveAvgPool3DTest(InferencePassTest): ) with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=[-1, self.channel, self.depth, self.height, self.width], dtype='float32', @@ -290,7 +290,7 @@ class TensorRTAdaptiveMaxPool3DTest(InferencePassTest): ) with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=[-1, self.channel, self.depth, self.height, self.width], dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py index e91f183b146..5f9f4c21009 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py @@ -59,7 +59,7 @@ class TensorRTPoolTest(InferencePassTest): ) with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=[-1, self.channel, self.height, self.width], dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_sum_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_sum_op.py index cd66cb1e914..6872542ffd4 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_sum_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_sum_op.py @@ -27,7 +27,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TRTReduceSumTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 10, 192], dtype="float32" ) reduce_sum = paddle.sum(data, axis=[2, -1], keepdim=True) @@ -60,7 +60,7 @@ class TRTReduceSumTest(InferencePassTest): class TRTReduceSumAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 10, 192], dtype="float32" ) reduce_sum = paddle.sum(data, keepdim=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape_op.py index 8edd7cafcbe..c0b31088bfc 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_reshape_op.py @@ -36,7 +36,7 @@ class TRTReshapeTest(InferencePassTest): self.input_shape[2], ] with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=self.data_shape, dtype='float32' ) reshape_out = self.append_reshape(data, self.reshape) @@ -74,7 +74,7 @@ class TRTReshapeTest1(TRTReshapeTest): self.input_shape[2], ] with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=self.data_shape, dtype='float32' ) reshape_out = self.append_reshape(data, self.reshape) @@ -101,7 +101,7 @@ class TRTReshapeTest2(TRTReshapeTest): self.input_shape[2], ] with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=self.data_shape, dtype='float32' ) reshape_out = paddle.reshape(x=data, shape=self.reshape) @@ -128,7 +128,7 @@ class TRTReshapeTest3(TRTReshapeTest): self.input_shape[2], ] with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name='data', shape=self.data_shape, dtype='float32' ) bn_out = nn.batch_norm(data, is_test=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_scale_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_scale_op.py index 3bca0dbf184..4ec5295261e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_scale_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_scale_op.py @@ -27,7 +27,9 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TRTScaleTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[-1, 512], dtype="float32") + data = paddle.static.data( + name="data", shape=[-1, 512], dtype="float32" + ) scale_out = self.append_scale(data) out = nn.batch_norm(scale_out, is_test=True) @@ -57,7 +59,7 @@ class TRTScaleTest(InferencePassTest): class TRTScaleShape2Test(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 512, 512], dtype="float32" ) scale_out = self.append_scale(data) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_shuffle_channel_detect_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_shuffle_channel_detect_pass.py index fc3b066556d..ef21aecb34d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_shuffle_channel_detect_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_shuffle_channel_detect_pass.py @@ -26,7 +26,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class ShuffleChannelFuseTRTPassTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) reshape1 = paddle.reshape(x=data, shape=[-1, 2, 3, 64, 64]) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py index d3c242c8d83..31f294b9cbb 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_dynamic_plugin.py @@ -46,7 +46,9 @@ class SlicePluginTRTDynamicTest(InferencePassTest): self.setUpSliceParams() self.setUpTensorRTParams() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[3, 3, 3, 3], dtype="float32") + data = paddle.static.data( + name="data", shape=[3, 3, 3, 3], dtype="float32" + ) axes = self.params_axes starts = self.params_starts ends = self.params_ends diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py index aeea57e3888..6075add2c93 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_slice_plugin.py @@ -41,7 +41,9 @@ class SlicePluginTRTTest(InferencePassTest): self.setUpSliceParams() self.setUpTensorRTParams() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[3, 3, 3, 3], dtype="float32") + data = paddle.static.data( + name="data", shape=[3, 3, 3, 3], dtype="float32" + ) axes = self.params_axes starts = self.params_starts ends = self.params_ends @@ -110,7 +112,9 @@ class SlicePluginTRTTestInt32(SlicePluginTRTTest): self.setUpSliceParams() self.setUpTensorRTParams() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[3, 3, 3, 3], dtype="int32") + data = paddle.static.data( + name="data", shape=[3, 3, 3, 3], dtype="int32" + ) axes = self.params_axes starts = self.params_starts ends = self.params_ends @@ -135,7 +139,9 @@ class StaticSlicePluginTRTTestInt32(SlicePluginTRTTest): self.setUpSliceParams() self.setUpTensorRTParams() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[3, 3, 3, 3], dtype="int32") + data = paddle.static.data( + name="data", shape=[3, 3, 3, 3], dtype="int32" + ) axes = self.params_axes starts = self.params_starts ends = self.params_ends diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py index 55347875152..e567a329fbc 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py @@ -28,7 +28,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TensorRTSubgraphPassFcTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) fc_out = paddle.static.nn.fc(x=[data], activation=None, size=1000) @@ -55,10 +55,10 @@ class TensorRTSubgraphPassFcTest(InferencePassTest): class TensorRTSubgraphPassConcatTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[-1, 3, 64, 64], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[-1, 3, 64, 64], dtype="float32" ) concat_out = paddle.concat([data1, data2], axis=2) @@ -85,7 +85,7 @@ class TensorRTSubgraphPassConcatTest(InferencePassTest): class TensorRTSubgraphPassSplitTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) split_out = paddle.split(data, axis=-1, num_or_sections=2) @@ -111,7 +111,7 @@ class TensorRTSubgraphPassSplitTest(InferencePassTest): class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) split_out = paddle.split(data, axis=-1, num_or_sections=2) @@ -139,7 +139,7 @@ class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest): class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) split_out = paddle.split(data, axis=-1, num_or_sections=2) @@ -175,7 +175,7 @@ class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest): class TensorRTSubgraphPassInstanceNormTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) param_attr = fluid.ParamAttr( @@ -212,7 +212,7 @@ class TensorRTSubgraphPassInstanceNormTest(InferencePassTest): class TensorRTSubgraphPassTransposeTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) transpose_out = self.append_transpose(data) @@ -242,7 +242,7 @@ class TensorRTSubgraphPassLayerNormTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) out = paddle.static.nn.layer_norm( @@ -273,7 +273,7 @@ class TensorRTSubgraphPassLayerNormDynamicTest(InferencePassTest): def setUp(self): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) out = paddle.static.nn.layer_norm( @@ -359,10 +359,10 @@ class TensorRTSubgraphPassLayerNormBeginNormAxis3Test( class TensorRTSubgraphPassElementwiseTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[-1, 3, 64, 64], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[-1, 3, 64, 64], dtype="float32" ) eltwise_out = self.append_eltwise(data1, data2) @@ -414,10 +414,12 @@ class TensorRTSubgraphPassElementwiseSerializeTest( class TensorRTSubgraphPassElementwiseBroadcastDynamicTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[-1, 3, 64, 64], dtype="float32" ) - data2 = fluid.data(name="data2", shape=[64, 64], dtype="float32") + data2 = paddle.static.data( + name="data2", shape=[64, 64], dtype="float32" + ) eltwise_out = self.append_eltwise(data1, data2) out = nn.batch_norm(eltwise_out, is_test=True) self.feeds = { diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py index 9557f8c71c9..ad163c1100e 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tile_op.py @@ -26,7 +26,7 @@ from paddle.fluid.core import AnalysisConfig, PassVersionChecker class TRTTileTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[4, 3, 224, 256], dtype="float32" ) tile_out = paddle.tile(x=data, repeat_times=[1, 1, 1, 1]) @@ -53,7 +53,9 @@ class TRTTileTest(InferencePassTest): class TRTTileExpandTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32") + data = paddle.static.data( + name="data", shape=[1, 1, 1, 1], dtype="float32" + ) tile_out = paddle.tile(x=data, repeat_times=[1, 4, 1080, 1920]) out = paddle.static.nn.batch_norm(tile_out, is_test=True) @@ -78,7 +80,9 @@ class TRTTileExpandTest(InferencePassTest): class TRTTileExpandStaticTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32") + data = paddle.static.data( + name="data", shape=[1, 1, 1, 1], dtype="float32" + ) tile_out = paddle.tile(x=data, repeat_times=[1, 4, 1080, 1920]) out = paddle.static.nn.batch_norm(tile_out, is_test=True) @@ -103,7 +107,9 @@ class TRTTileExpandStaticTest(InferencePassTest): class TRTTileExpandHalfTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data(name="data", shape=[1, 1, 1, 1], dtype="float32") + data = paddle.static.data( + name="data", shape=[1, 1, 1, 1], dtype="float32" + ) tile_out = paddle.tile(x=data, repeat_times=[1, 4, 1080, 1920]) out = paddle.static.nn.batch_norm(tile_out, is_test=True) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py index 5d9b3642999..ad342e6d479 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py @@ -26,10 +26,10 @@ from paddle.fluid.core import AnalysisConfig class TransposeFlattenConcatFusePassTRTTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[8, 32, 128], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[8, 32, 128], dtype="float32" ) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tuned_dynamic_shape.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tuned_dynamic_shape.py index cb587a8a806..c6c90186f26 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tuned_dynamic_shape.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_tuned_dynamic_shape.py @@ -31,7 +31,7 @@ class TRTTunedDynamicShapeTest(unittest.TestCase): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) conv_out = paddle.static.nn.conv2d( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_yolo_box_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_yolo_box_op.py index a578c5216f3..f51cfe685dc 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_yolo_box_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_yolo_box_op.py @@ -27,8 +27,10 @@ class TRTYoloBoxTest(InferencePassTest): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): image_shape = [self.bs, self.channel, self.height, self.width] - image = fluid.data(name='image', shape=image_shape, dtype='float32') - image_size = fluid.data( + image = paddle.static.data( + name='image', shape=image_shape, dtype='float32' + ) + image_size = paddle.static.data( name='image_size', shape=[self.bs, 2], dtype='int32' ) boxes, scores = self.append_yolobox(image, image_size) @@ -79,8 +81,10 @@ class TRTYoloBoxFP16Test(InferencePassTest): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): image_shape = [self.bs, self.channel, self.height, self.width] - image = fluid.data(name='image', shape=image_shape, dtype='float32') - image_size = fluid.data( + image = paddle.static.data( + name='image', shape=image_shape, dtype='float32' + ) + image_size = paddle.static.data( name='image_size', shape=[self.bs, 2], dtype='int32' ) boxes, scores = self.append_yolobox(image, image_size) @@ -129,8 +133,10 @@ class TRTYoloBoxIoUAwareTest(InferencePassTest): self.set_params() with fluid.program_guard(self.main_program, self.startup_program): image_shape = [self.bs, self.channel, self.height, self.width] - image = fluid.data(name='image', shape=image_shape, dtype='float32') - image_size = fluid.data( + image = paddle.static.data( + name='image', shape=image_shape, dtype='float32' + ) + image_size = paddle.static.data( name='image_size', shape=[self.bs, 2], dtype='int32' ) boxes, scores = self.append_yolobox(image, image_size) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py index 403729786d4..e490f1e23c6 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py @@ -25,7 +25,7 @@ import paddle.fluid.core as core class FCFusePassTest(PassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - data = fluid.data( + data = paddle.static.data( name="data", shape=[32, 128], dtype="float32", lod_level=0 ) tmp_0 = paddle.static.nn.fc( diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py index f0a121e5fcb..fbac04175f4 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py @@ -27,7 +27,7 @@ class FusionGroupPassTest(PassTest): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 2) self.feed_vars.append( - fluid.data(name="data2", shape=[128, 128], dtype=dtype) + paddle.static.data(name="data2", shape=[128, 128], dtype=dtype) ) # subgraph with only 1 op node @@ -51,7 +51,9 @@ class FusionGroupPassTest(PassTest): def _prepare_feed_vars(self, shape, dtype, num_data, stop_gradient=True): feed_vars = [] for i in range(num_data): - var = fluid.data(name=("data" + str(i)), shape=shape, dtype=dtype) + var = paddle.static.data( + name=("data" + str(i)), shape=shape, dtype=dtype + ) var.stop_gradient = stop_gradient feed_vars.append(var) return feed_vars @@ -108,7 +110,7 @@ class FusionGroupPassInplaceTest(FusionGroupPassTest): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 3) self.feed_vars.append( - fluid.data(name="data3", shape=[128, 32], dtype=dtype) + paddle.static.data(name="data3", shape=[128, 32], dtype=dtype) ) # subgraph with 3 op node @@ -134,7 +136,7 @@ class FusionGroupPassTestCastAndFP16(FusionGroupPassTest): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 2) self.feed_vars.append( - fluid.data(name="data2", shape=[128, 128], dtype=dtype) + paddle.static.data(name="data2", shape=[128, 128], dtype=dtype) ) # subgraph with 2 op nodes @@ -165,7 +167,7 @@ class FusionGroupPassSumTest(FusionGroupPassTest): with fluid.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 3) self.feed_vars.append( - fluid.data(name="data3", shape=[128, 128], dtype=dtype) + paddle.static.data(name="data3", shape=[128, 128], dtype=dtype) ) # subgraph with 2 op nodes diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py index c79266711c8..e0cb1e75950 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_skip_layernorm_pass.py @@ -25,10 +25,10 @@ class SkipLayerNormFusePassTest(PassTest): def setUp(self): paddle.enable_static() with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.data( + x = paddle.static.data( name="x", shape=[128, 768], dtype="float32", lod_level=0 ) - y = fluid.data( + y = paddle.static.data( name="y", shape=[128, 768], dtype="float32", lod_level=0 ) elementwise_out = paddle.add(x=x, y=y) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py index abd86efcf84..69143234a9d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py @@ -803,7 +803,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): is_test=is_test, trainable_statistics=trainable_statistics, ) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -820,7 +820,7 @@ class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase): with program_guard(Program(), Program()): paddle.enable_static() x = np.random.random(size=(3, 10, 3, 7)).astype('float32') - x = fluid.data(name='x', shape=x.shape, dtype=x.dtype) + x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) # Set this FLAG, the BatchNorm API will pass "reserve_space" argument into batch_norm op. os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = '1' batch_norm = paddle.nn.BatchNorm(7, data_layout="NHWC") diff --git a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py index 590ebbf63ef..55a45b65517 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py @@ -157,7 +157,7 @@ class TestBatchNorm(unittest.TestCase): is_test=is_test, trainable_statistics=trainable_statistics, ) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -166,7 +166,7 @@ class TestBatchNorm(unittest.TestCase): def compute_v2(x_np): with program_guard(Program(), Program()): bn = paddle.nn.BatchNorm2D(shape[1]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] diff --git a/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py index 58ccf798136..de589a22ed0 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_bce_loss_mlu.py @@ -30,14 +30,14 @@ def test_static_layer( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=input_np.shape, dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float32' ) if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float32' ) bce_loss = paddle.nn.loss.BCELoss( @@ -63,14 +63,14 @@ def test_static_functional( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=input_np.shape, dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float32' ) if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float32' ) res = paddle.nn.functional.binary_cross_entropy( diff --git a/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py index 0aafe99276f..6e24c065107 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_bce_with_logits_loss_mlu.py @@ -41,10 +41,10 @@ def test_static( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - logit = paddle.fluid.data( + logit = paddle.static.data( name='logit', shape=logit_np.shape, dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float32' ) feed_dict = {"logit": logit_np, "label": label_np} @@ -52,12 +52,12 @@ def test_static( pos_weight = None weight = None if pos_weight_np is not None: - pos_weight = paddle.fluid.data( + pos_weight = paddle.static.data( name='pos_weight', shape=pos_weight_np.shape, dtype='float32' ) feed_dict["pos_weight"] = pos_weight_np if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float32' ) feed_dict["weight"] = weight_np diff --git a/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py index 57d004541af..65718123d6d 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_dropout_op_mlu.py @@ -224,7 +224,7 @@ class TestDropoutAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[40, 40], dtype="float32") + input = paddle.static.data(name="input", shape=[40, 40], dtype="float32") res1 = paddle.nn.functional.dropout( x=input, p=0.0, training=False, mode='upscale_in_train' ) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py index ac043fc5f47..fc03a6042f8 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_elementwise_add_op_mlu.py @@ -402,8 +402,8 @@ class TestAddApi(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = self._executed_api(x, y, name='add_res') self.assertEqual(('add_res' in y_1.name), True) @@ -417,8 +417,8 @@ class TestAddApi(unittest.TestCase): "y": np.array([1, 5, 2]).astype('float32'), } - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') + x = paddle.static.data(name="x", shape=[3], dtype='float32') + y = paddle.static.data(name="y", shape=[3], dtype='float32') z = self._executed_api(x, y) place = fluid.MLUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py index ed03ac0d3ad..7b241075e47 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py @@ -271,10 +271,10 @@ class TestFillConstantAPI(unittest.TestCase): positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) - shape_tensor_int32 = fluid.data( + shape_tensor_int32 = paddle.static.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) - shape_tensor_int64 = fluid.data( + shape_tensor_int64 = paddle.static.data( name="shape_tensor_int64", shape=[2], dtype="int64" ) @@ -446,7 +446,7 @@ class TestFillConstantOpError(unittest.TestCase): # The shape dtype of fill_constant_op must be int32 or int64. def test_shape_tensor_dtype(): - shape = fluid.data( + shape = paddle.static.data( name="shape_tensor", shape=[2], dtype="float32" ) paddle.tensor.fill_constant( @@ -456,7 +456,7 @@ class TestFillConstantOpError(unittest.TestCase): self.assertRaises(TypeError, test_shape_tensor_dtype) def test_shape_tensor_list_dtype(): - shape = fluid.data( + shape = paddle.static.data( name="shape_tensor_list", shape=[1], dtype="bool" ) paddle.tensor.fill_constant( diff --git a/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py index d4c5e966570..0ce65e30bcc 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_gather_op_mlu.py @@ -129,10 +129,10 @@ class TestGathertError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='int8', name='x') - axis = paddle.fluid.data(shape=[1], dtype='float32', name='axis') - index = paddle.fluid.data(shape=shape, dtype='int32', name='index') - index_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='int8', name='x') + axis = paddle.static.data(shape=[1], dtype='float32', name='axis') + index = paddle.static.data(shape=shape, dtype='int32', name='index') + index_float = paddle.static.data( shape=shape, dtype='float32', name='index_float' ) @@ -160,9 +160,9 @@ class TestGathertError(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): shape = [8, 9, 6] - x = fluid.data(shape=shape, dtype='int8', name='x') - index = fluid.data(shape=shape, dtype='int32', name='mask') - index_float = fluid.data( + x = paddle.static.data(shape=shape, dtype='int8', name='x') + index = paddle.static.data(shape=shape, dtype='int32', name='mask') + index_float = paddle.static.data( shape=shape, dtype='float32', name='index_float' ) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py index 6575b0decd4..13046487ba6 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_hard_sigmoid_op_mlu.py @@ -161,7 +161,7 @@ class TestHardsigmoidAPI(unittest.TestCase): def test_fluid_api(self): paddle.enable_static() with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.hardsigmoid(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -179,12 +179,12 @@ class TestHardsigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardsigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.hardsigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.hardsigmoid(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py index 82d22af8933..3a35457a74b 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py @@ -140,7 +140,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): paddle.enable_static() # test static api with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = logsoftmax(x) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -174,7 +174,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): x = x.astype(dtype) ref_out = np.apply_along_axis(ref_log_softmax, axis, x) with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = F.log_softmax(x, axis, dtype) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -194,10 +194,10 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='X1', shape=[100], dtype='int32') + x = paddle.static.data(name='X1', shape=[100], dtype='int32') self.assertRaises(TypeError, F.log_softmax, x) - x = paddle.fluid.data(name='X2', shape=[100], dtype='float32') + x = paddle.static.data(name='X2', shape=[100], dtype='float32') self.assertRaises(TypeError, F.log_softmax, x, dtype='int32') paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/mlu/test_masked_select_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_masked_select_op_mlu.py index 11e9dc86cd4..9c07c4984e4 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_masked_select_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_masked_select_op_mlu.py @@ -107,8 +107,8 @@ class TestMaskedSelectAPI(unittest.TestCase): def test_static_mode(self): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) @@ -132,9 +132,9 @@ class TestMaskedSelectError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') - mask_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') + mask_float = paddle.static.data( shape=shape, dtype='float32', name='mask_float' ) np_x = np.random.random(shape).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/mlu/test_matmul_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_matmul_v2_op_mlu.py index f1b142902b1..4c866e3f429 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_matmul_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_matmul_v2_op_mlu.py @@ -350,8 +350,8 @@ class TestMatMulV2API(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = fluid.data(name="input_x", shape=[4, 3], dtype="float32") - input_y = fluid.data(name="input_y", shape=[3, 4], dtype="float32") + input_x = paddle.static.data(name="input_x", shape=[4, 3], dtype="float32") + input_y = paddle.static.data(name="input_y", shape=[3, 4], dtype="float32") result = paddle.matmul(input_x, input_y) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_meshgrid_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_meshgrid_op_mlu.py index ecfb94e335b..e89c46041bb 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_meshgrid_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_meshgrid_op_mlu.py @@ -68,8 +68,8 @@ class TestMeshgridOp2(TestMeshgridOp): class TestMeshgridOp3(unittest.TestCase): def test_api(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, @@ -104,8 +104,8 @@ class TestMeshgridOp3(unittest.TestCase): class TestMeshgridOp4(unittest.TestCase): def test_list_input(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, @@ -141,8 +141,8 @@ class TestMeshgridOp4(unittest.TestCase): class TestMeshgridOp5(unittest.TestCase): def test_tuple_input(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, diff --git a/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py index cb13f305cae..fcf0c399fe9 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_scatter_op_mlu.py @@ -127,9 +127,9 @@ class TestScatterAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[3, 2], dtype="float32") - index = fluid.data(name="index", shape=[4], dtype="int64") - updates = fluid.data(name="updates", shape=[4, 2], dtype="float32") + input = paddle.static.data(name="input", shape=[3, 2], dtype="float32") + index = paddle.static.data(name="index", shape=[4], dtype="int64") + updates = paddle.static.data(name="updates", shape=[4, 2], dtype="float32") result = self.scatter(input, index, updates, False) input_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_size_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_size_op_mlu.py index a0cd8eba6dc..bd5fe04a620 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_size_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_size_op_mlu.py @@ -67,8 +67,8 @@ class TestSizeAPI(unittest.TestCase): with fluid.program_guard(main_program, startup_program): shape1 = [2, 1, 4, 5] shape2 = [1, 4, 5] - x_1 = paddle.fluid.data(shape=shape1, dtype='int32', name='x_1') - x_2 = paddle.fluid.data(shape=shape2, dtype='int32', name='x_2') + x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1') + x_2 = paddle.static.data(shape=shape2, dtype='int32', name='x_2') input_1 = np.random.random(shape1).astype("int32") input_2 = np.random.random(shape2).astype("int32") out_1 = paddle.numel(x_1) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py index 8c1ebbe01e5..0fb29029ede 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_softmax_op_mlu.py @@ -132,7 +132,7 @@ class TestSoftmaxAPI(unittest.TestCase): def test_static_check(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, 'float32') + x = paddle.static.data('X', self.x_np.shape, 'float32') out1 = self.softmax(x) m = paddle.nn.Softmax() out2 = m(x) @@ -173,12 +173,12 @@ class TestSoftmaxAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.softmax, 1) # The input dtype must be float16, float32 - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[2, 3], dtype='int32' ) self.assertRaises(TypeError, self.softmax, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[2, 3], dtype='float16' ) self.softmax(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py index 60cd1e27c70..75dc659d0b3 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py @@ -256,7 +256,7 @@ class TestTransposeApi(unittest.TestCase): class TestTAPI(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program()): - data = fluid.data(shape=[10], dtype="float32", name="data") + data = paddle.static.data(shape=[10], dtype="float32", name="data") data_t = paddle.t(data) place = fluid.MLUPlace(0) exe = fluid.Executor(place) @@ -266,7 +266,7 @@ class TestTAPI(unittest.TestCase): self.assertEqual((result == expected_result).all(), True) with fluid.program_guard(fluid.Program()): - data = fluid.data(shape=[10, 5], dtype="float32", name="data") + data = paddle.static.data(shape=[10, 5], dtype="float32", name="data") data_t = paddle.t(data) place = fluid.MLUPlace(0) exe = fluid.Executor(place) @@ -276,7 +276,7 @@ class TestTAPI(unittest.TestCase): self.assertEqual((result == expected_result).all(), True) with fluid.program_guard(fluid.Program()): - data = fluid.data(shape=[1, 5], dtype="float32", name="data") + data = paddle.static.data(shape=[1, 5], dtype="float32", name="data") data_t = paddle.t(data) place = fluid.MLUPlace(0) exe = fluid.Executor(place) @@ -311,7 +311,7 @@ class TestTAPI(unittest.TestCase): def test_errors(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name='x', shape=[10, 5, 3], dtype='float32') + x = paddle.static.data(name='x', shape=[10, 5, 3], dtype='float32') def test_x_dimension_check(): paddle.t(x) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py index 0f5b98c9bc9..66271aa6715 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_tril_triu_op_mlu.py @@ -81,7 +81,7 @@ def case_generator(op_type, Xshape, diagonal, expected): def test_failure(self): paddle.enable_static() - data = fluid.data(shape=Xshape, dtype='float64', name=cls_name) + data = paddle.static.data(shape=Xshape, dtype='float64', name=cls_name) with self.assertRaisesRegex( eval(expected.split(':')[-1]), errmsg[expected] ): @@ -146,7 +146,7 @@ class TestTrilTriuOpAPI(unittest.TestCase): startup_prog = Program() with program_guard(prog, startup_prog): data = np.random.random([1, 9, 9, 4]).astype(dtype) - x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') + x = paddle.static.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') tril_out, triu_out = tensor.tril(x), tensor.triu(x) place = fluid.MLUPlace(0) @@ -183,7 +183,7 @@ class TestTrilTriuOpAPI(unittest.TestCase): startup_prog = Program() with program_guard(prog, startup_prog): data = np.random.random([1, 9, 9, 4]).astype(dtype) - x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') + x = paddle.static.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') triu_out = paddle.triu(x) place = fluid.MLUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py index 353fd250a5e..08944b97e75 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_batch_norm_op_npu.py @@ -588,7 +588,7 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): is_test=is_test, trainable_statistics=trainable_statistics, ) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] diff --git a/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py b/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py index d9e968964af..da7df48ecdf 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py @@ -30,14 +30,14 @@ def test_static_layer( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=input_np.shape, dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float32' ) if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float32' ) bce_loss = paddle.nn.loss.BCELoss( @@ -63,14 +63,14 @@ def test_static_functional( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=input_np.shape, dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float32' ) if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float32' ) res = paddle.nn.functional.binary_cross_entropy( diff --git a/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py index 37dcf8465bc..9b5d14e0ff2 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py @@ -137,9 +137,9 @@ class TestClipAPI(unittest.TestCase): paddle.enable_static() data_shape = [1, 9, 9, 4] data = np.random.random(data_shape).astype('float32') - images = fluid.data(name='image', shape=data_shape, dtype='float32') - min = fluid.data(name='min', shape=[1], dtype='float32') - max = fluid.data(name='max', shape=[1], dtype='float32') + images = paddle.static.data(name='image', shape=data_shape, dtype='float32') + min = paddle.static.data(name='min', shape=[1], dtype='float32') + max = paddle.static.data(name='max', shape=[1], dtype='float32') place = ( fluid.NPUPlace(0) @@ -203,8 +203,8 @@ class TestClipAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() - x1 = fluid.data(name='x1', shape=[1], dtype="int16") - x2 = fluid.data(name='x2', shape=[1], dtype="int8") + x1 = paddle.static.data(name='x1', shape=[1], dtype="int16") + x2 = paddle.static.data(name='x2', shape=[1], dtype="int8") self.assertRaises(TypeError, paddle.clip, x=x1, min=0.2, max=0.8) self.assertRaises(TypeError, paddle.clip, x=x2, min=0.2, max=0.8) paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py index 91b85086467..6ed96bcfbff 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_dropout_op_npu.py @@ -215,7 +215,7 @@ class TestDropoutAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[40, 40], dtype="float32") + input = paddle.static.data(name="input", shape=[40, 40], dtype="float32") res1 = paddle.nn.functional.dropout( x=input, p=0.0, training=False, mode='upscale_in_train' ) diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py index 2e35a398bb7..f009b43bf5f 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_add_op_npu.py @@ -510,8 +510,8 @@ class TestAddApi(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = self._executed_api(x, y, name='add_res') self.assertEqual(('add_res' in y_1.name), True) @@ -525,8 +525,8 @@ class TestAddApi(unittest.TestCase): "y": np.array([1, 5, 2]).astype('float32'), } - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') + x = paddle.static.data(name="x", shape=[3], dtype='float32') + y = paddle.static.data(name="y", shape=[3], dtype='float32') z = self._executed_api(x, y) place = fluid.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py index efb81fbad6e..46eb7f90825 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_elementwise_mod_op_npu.py @@ -144,8 +144,8 @@ class TestRemainderOp(unittest.TestCase): def test_name(self): paddle.set_device('npu:0') with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="int64") - y = fluid.data(name='y', shape=[2, 3], dtype='int64') + x = paddle.static.data(name="x", shape=[2, 3], dtype="int64") + y = paddle.static.data(name='y', shape=[2, 3], dtype='int64') y_1 = paddle.remainder(x, y, name='div_res') self.assertEqual(('div_res' in y_1.name), True) diff --git a/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py index 3ce9042d75f..e2a5229f18a 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py @@ -101,8 +101,8 @@ class API_TestGather(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data('x', shape=[-1, 2], dtype='float32') - index = paddle.fluid.data('index', shape=[-1, 1], dtype='int32') + x = paddle.static.data('x', shape=[-1, 2], dtype='float32') + index = paddle.static.data('index', shape=[-1, 1], dtype='int32') out = paddle.gather(x, index) place = paddle.NPUPlace(0) exe = paddle.static.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py index 56430ee7c13..113b5e1e6a3 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py @@ -216,7 +216,7 @@ class TestGroupNormOpFP16_With_NHWC(TestGroupNormOp): class TestGroupNormException(unittest.TestCase): # data_layout is not NHWC or NCHW def test_exception(self): - data = fluid.data(name='data', shape=[None, 3, 3, 4], dtype="float64") + data = paddle.static.data(name='data', shape=[None, 3, 3, 4], dtype="float64") def attr_data_format(): out = paddle.static.nn.group_norm( diff --git a/python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py index 55dc1e0a110..23c8fde4495 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_hard_sigmoid_op_npu.py @@ -122,7 +122,7 @@ class TestHardsigmoidAPI(unittest.TestCase): def test_fluid_api(self): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.hardsigmoid(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -140,12 +140,12 @@ class TestHardsigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardsigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.hardsigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.hardsigmoid(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/npu/test_index_sample_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_index_sample_op_npu.py index 5883ef7b567..6a27df3ad6e 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_index_sample_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_index_sample_op_npu.py @@ -160,8 +160,8 @@ class TestIndexSampleShape(unittest.TestCase): low=0, high=x_shape[1], size=index_shape ).astype(index_type) - x = fluid.data(name='x', shape=[-1, 5], dtype='float32') - index = fluid.data(name='index', shape=[-1, 3], dtype='int32') + x = paddle.static.data(name='x', shape=[-1, 5], dtype='float32') + index = paddle.static.data(name='index', shape=[-1, 3], dtype='int32') output = paddle.index_sample(x=x, index=index) place = fluid.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py index 65174d8caf5..ce74c0d0df9 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_instance_norm_op_npu.py @@ -61,7 +61,7 @@ class TestInstanceNorm(unittest.TestCase): def compute_v1(x_np): with program_guard(Program(), Program()): ins = paddle.nn.InstanceNorm(shape[1]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = ins(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -70,7 +70,7 @@ class TestInstanceNorm(unittest.TestCase): def compute_v2(x_np): with program_guard(Program(), Program()): ins = paddle.nn.InstanceNorm2D(shape[1]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = ins(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] diff --git a/python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py index 3a55d9973af..160dfdff178 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_kldiv_loss_op_npu.py @@ -137,8 +137,8 @@ class TestKLDivLossDygraph(unittest.TestCase): self.run_kl_loss('none') def test_kl_loss_static_api(self): - input = paddle.fluid.data(name='input', shape=[5, 20]) - label = paddle.fluid.data(name='label', shape=[5, 20]) + input = paddle.static.data(name='input', shape=[5, 20]) + label = paddle.static.data(name='label', shape=[5, 20]) pred_loss = paddle.nn.functional.kl_div(input, label) diff --git a/python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py index fc0c428983f..41b6d45620e 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_log_softmax_op_npu.py @@ -124,7 +124,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): logsoftmax = paddle.nn.LogSoftmax(axis) # test static api with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = logsoftmax(x) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -158,7 +158,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): x = x.astype(dtype) ref_out = np.apply_along_axis(ref_log_softmax, axis, x) with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = F.log_softmax(x, axis, dtype) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) diff --git a/python/paddle/fluid/tests/unittests/npu/test_masked_select_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_masked_select_op_npu.py index 8943b5ba95f..379b13721f7 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_masked_select_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_masked_select_op_npu.py @@ -116,8 +116,8 @@ class TestMaskedSelectAPI(unittest.TestCase): def test_static_mode(self): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) @@ -141,9 +141,9 @@ class TestMaskedSelectError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') - mask_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') + mask_float = paddle.static.data( shape=shape, dtype='float32', name='mask_float' ) np_x = np.random.random(shape).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py index 91883824cf5..06b5ff54cce 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_matmulv2_op_npu.py @@ -337,8 +337,8 @@ class TestMatMulV2API(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = fluid.data(name="input_x", shape=[4, 3], dtype="float32") - input_y = fluid.data(name="input_y", shape=[3, 4], dtype="float32") + input_x = paddle.static.data(name="input_x", shape=[4, 3], dtype="float32") + input_y = paddle.static.data(name="input_y", shape=[3, 4], dtype="float32") result = paddle.matmul(input_x, input_y) diff --git a/python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py index 8af8f899244..c153dece3c7 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_meshgrid_op_npu.py @@ -92,8 +92,8 @@ class TestMeshgridOp2(TestMeshgridOp): class TestMeshgridOp3(unittest.TestCase): def test_api(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, @@ -129,8 +129,8 @@ class TestMeshgridOp3(unittest.TestCase): class TestMeshgridOp4(unittest.TestCase): def test_list_input(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, @@ -166,8 +166,8 @@ class TestMeshgridOp4(unittest.TestCase): class TestMeshgridOp5(unittest.TestCase): def test_tuple_input(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, diff --git a/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py index ccff4ffd0cf..f8b55aedd5c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_multinomial_op_npu.py @@ -163,7 +163,7 @@ class TestMultinomialApi(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - x = fluid.data('x', shape=[4], dtype='float32') + x = paddle.static.data('x', shape=[4], dtype='float32') out = paddle.multinomial(x, num_samples=100000, replacement=True) place = fluid.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_norm_op_npu.py index ca6979a7615..c0894edc51a 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_norm_op_npu.py @@ -106,7 +106,7 @@ class API_NormTest(unittest.TestCase): with fluid.program_guard(fluid.Program()): def test_norm_x_type(): - data = fluid.data(name="x", shape=[3, 3], dtype="float64") + data = paddle.static.data(name="x", shape=[3, 3], dtype="float64") out = paddle.nn.functional.normalize(data) self.assertRaises(TypeError, test_norm_x_type) diff --git a/python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py index 5d0d25e1f20..d8f68c4df2c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_pad3d_op_npu.py @@ -176,7 +176,7 @@ class TestPadAPI(unittest.TestCase): mode = "constant" value = 0 input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) result1 = F.pad( x=x, pad=pad, value=value, mode=mode, data_format="NCDHW" ) @@ -454,7 +454,7 @@ class TestPad3dOpNpuError(unittest.TestCase): def test_value(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) y = F.pad(x, pad=[1, 1, 1, 1, 1, 1], value=1, mode='constant') place = paddle.NPUPlace() exe = Executor(place) @@ -463,7 +463,7 @@ class TestPad3dOpNpuError(unittest.TestCase): def test_mode_1(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) y = F.pad(x, pad=[1, 1, 1, 1, 1, 1], mode='reflect') place = paddle.NPUPlace() exe = Executor(place) @@ -472,7 +472,7 @@ class TestPad3dOpNpuError(unittest.TestCase): def test_mode_2(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) y = F.pad(x, pad=[1, 1, 1, 1, 1, 1], mode='replicate') place = paddle.NPUPlace() exe = Executor(place) @@ -481,7 +481,7 @@ class TestPad3dOpNpuError(unittest.TestCase): def test_mode_3(): input_shape = (1, 2, 3, 4, 5) data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) y = F.pad(x, pad=[1, 1, 1, 1, 1, 1], mode='circular') place = paddle.CPUPlace() exe = Executor(place) diff --git a/python/paddle/fluid/tests/unittests/npu/test_pad_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_pad_op_npu.py index c02d6012e41..a6bbaf6f8de 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_pad_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_pad_op_npu.py @@ -122,7 +122,7 @@ class TestPadOpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable) - data = fluid.data(name='data', shape=[4], dtype='float16') + data = paddle.static.data(name='data', shape=[4], dtype='float16') paddle.nn.functional.pad(x=data, pad=[0, 1]) diff --git a/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py index 36bf75fafed..88302c427f7 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_run_program_op_npu.py @@ -289,7 +289,7 @@ class TestRunProgramOpWithFC(RunProgramNPUOpTest): def build_model(self): # 1. simple model - img = fluid.data( + img = paddle.static.data( name=self.input_names['X'][0], shape=[None, 1, 28, 28], dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py index 0b1d87b3dfa..c955c828174 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py @@ -96,8 +96,8 @@ class TestSizeAPI(unittest.TestCase): with fluid.program_guard(main_program, startup_program): shape1 = [2, 1, 4, 5] shape2 = [1, 4, 5] - x_1 = paddle.fluid.data(shape=shape1, dtype='int32', name='x_1') - x_2 = paddle.fluid.data(shape=shape2, dtype='int32', name='x_2') + x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1') + x_2 = paddle.static.data(shape=shape2, dtype='int32', name='x_2') input_1 = np.random.random(shape1).astype("int32") input_2 = np.random.random(shape2).astype("int32") out_1 = paddle.numel(x_1) diff --git a/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py index f2bb48bad7e..853ccbc2101 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_take_along_axis_op_npu.py @@ -89,8 +89,8 @@ class TestTakeAlongAxisAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) - index = paddle.fluid.data('Index', self.index_shape, "int64") + x = paddle.static.data('X', self.shape) + index = paddle.static.data('Index', self.index_shape, "int64") out = paddle.take_along_axis(x, index, self.axis) exe = paddle.static.Executor(self.place) res = exe.run( diff --git a/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py index 97385ca04d7..92d52bc604e 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py @@ -82,7 +82,7 @@ def case_generator(op_type, Xshape, diagonal, expected): def test_failure(self): paddle.enable_static() - data = fluid.data(shape=Xshape, dtype='float32', name=cls_name) + data = paddle.static.data(shape=Xshape, dtype='float32', name=cls_name) with self.assertRaisesRegex( eval(expected.split(':')[-1]), errmsg[expected] ): @@ -147,7 +147,7 @@ class TestTrilTriuOpAPI(unittest.TestCase): startup_prog = Program() with program_guard(prog, startup_prog): data = np.random.random([1, 9, 9, 4]).astype(dtype) - x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') + x = paddle.static.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') tril_out, triu_out = tensor.tril(x), tensor.triu(x) place = fluid.NPUPlace(0) @@ -184,7 +184,7 @@ class TestTrilTriuOpAPI(unittest.TestCase): startup_prog = Program() with program_guard(prog, startup_prog): data = np.random.random([1, 9, 9, 4]).astype(dtype) - x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') + x = paddle.static.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') triu_out = paddle.triu(x) place = fluid.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py index 07fb19031c0..571280881b9 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py @@ -104,17 +104,17 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): class TestUpdateLossScalingLayer(unittest.TestCase): def loss_scaling_check(self, use_npu=True, scope=fluid.Scope()): - a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') - b = fluid.data(name="b", shape=[512, 128], dtype='float32') + a = paddle.static.data(name="a", shape=[1024, 1024], dtype='float32') + b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] - found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data( + found_inf = paddle.static.data(name="found_inf", shape=[1], dtype='bool') + prev_loss_scaling = paddle.static.data( name="prev_loss_scaling", shape=[1], dtype='float32' ) - num_good_steps = fluid.data( + num_good_steps = paddle.static.data( name="num_good_steps", shape=[1], dtype='int32' ) - num_bad_steps = fluid.data( + num_bad_steps = paddle.static.data( name="num_bad_steps", shape=[1], dtype='int32' ) @@ -175,17 +175,17 @@ class TestUpdateLossScalingLayer(unittest.TestCase): assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) def loss_scaling_check_inf(self, use_npu=True, scope=fluid.Scope()): - a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') - b = fluid.data(name="b", shape=[512, 128], dtype='float32') + a = paddle.static.data(name="a", shape=[1024, 1024], dtype='float32') + b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] - found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data( + found_inf = paddle.static.data(name="found_inf", shape=[1], dtype='bool') + prev_loss_scaling = paddle.static.data( name="prev_loss_scaling", shape=[1], dtype='float32' ) - num_good_steps = fluid.data( + num_good_steps = paddle.static.data( name="num_good_steps", shape=[1], dtype='int32' ) - num_bad_steps = fluid.data( + num_bad_steps = paddle.static.data( name="num_bad_steps", shape=[1], dtype='int32' ) diff --git a/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py index 5e9baa696fc..bc4d9583dde 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py @@ -89,11 +89,11 @@ class TestNPUWhereAPI(unittest.TestCase): train_prog = fluid.Program() startup = fluid.Program() with fluid.program_guard(train_prog, startup): - cond = fluid.data( + cond = paddle.static.data( name='cond', shape=self.shape, dtype='bool' ) - x = fluid.data(name='x', shape=self.shape, dtype='float32') - y = fluid.data(name='y', shape=self.shape, dtype='float32') + x = paddle.static.data(name='x', shape=self.shape, dtype='float32') + y = paddle.static.data(name='y', shape=self.shape, dtype='float32') x.stop_gradient = x_stop_gradient y.stop_gradient = y_stop_gradient diff --git a/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py b/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py index 646ab79fe33..a7edbc67496 100644 --- a/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py +++ b/python/paddle/fluid/tests/unittests/rnn/test_rnn_cells_static.py @@ -73,12 +73,12 @@ class TestSimpleRNNCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype(), ) - init_h = paddle.fluid.data( + init_h = paddle.static.data( "init_h", [-1, 32], dtype=paddle.framework.get_default_dtype(), @@ -105,7 +105,7 @@ class TestSimpleRNNCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype(), @@ -176,12 +176,12 @@ class TestGRUCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype(), ) - init_h = paddle.fluid.data( + init_h = paddle.static.data( "init_h", [-1, 32], dtype=paddle.framework.get_default_dtype(), @@ -208,7 +208,7 @@ class TestGRUCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype(), @@ -280,17 +280,17 @@ class TestLSTMCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype(), ) - init_h = paddle.fluid.data( + init_h = paddle.static.data( "init_h", [-1, 32], dtype=paddle.framework.get_default_dtype(), ) - init_c = paddle.fluid.data( + init_c = paddle.static.data( "init_c", [-1, 32], dtype=paddle.framework.get_default_dtype(), @@ -318,7 +318,7 @@ class TestLSTMCell(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, 16], dtype=paddle.framework.get_default_dtype(), diff --git a/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py b/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py index d8b6ba12559..da04ca9e1bd 100755 --- a/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py +++ b/python/paddle/fluid/tests/unittests/rnn/test_rnn_nets_static.py @@ -88,12 +88,12 @@ class TestSimpleRNN(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), ) - init_h = paddle.fluid.data( + init_h = paddle.static.data( "init_h", [2 * self.num_directions, -1, 32], dtype=paddle.framework.get_default_dtype(), @@ -123,7 +123,7 @@ class TestSimpleRNN(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), @@ -155,12 +155,12 @@ class TestSimpleRNN(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), ) - seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") + seq_len = paddle.static.data("seq_len", [-1], dtype="int64") mask = paddle.static.nn.sequence_lod.sequence_mask( seq_len, dtype=paddle.get_default_dtype() ) @@ -245,12 +245,12 @@ class TestGRU(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), ) - init_h = paddle.fluid.data( + init_h = paddle.static.data( "init_h", [2 * self.num_directions, -1, 32], dtype=paddle.framework.get_default_dtype(), @@ -280,7 +280,7 @@ class TestGRU(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), @@ -312,12 +312,12 @@ class TestGRU(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), ) - seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") + seq_len = paddle.static.data("seq_len", [-1], dtype="int64") mask = paddle.static.nn.sequence_lod.sequence_mask( seq_len, dtype=paddle.get_default_dtype() ) @@ -401,17 +401,17 @@ class TestLSTM(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), ) - init_h = paddle.fluid.data( + init_h = paddle.static.data( "init_h", [2 * self.num_directions, -1, 32], dtype=paddle.framework.get_default_dtype(), ) - init_c = paddle.fluid.data( + init_c = paddle.static.data( "init_c", [2 * self.num_directions, -1, 32], dtype=paddle.framework.get_default_dtype(), @@ -442,7 +442,7 @@ class TestLSTM(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), @@ -475,12 +475,12 @@ class TestLSTM(unittest.TestCase): with paddle.fluid.unique_name.guard(): with paddle.static.program_guard(mp, sp): - x_data = paddle.fluid.data( + x_data = paddle.static.data( "input", [-1, -1, 16], dtype=paddle.framework.get_default_dtype(), ) - seq_len = paddle.fluid.data("seq_len", [-1], dtype="int64") + seq_len = paddle.static.data("seq_len", [-1], dtype="int64") mask = paddle.static.nn.sequence_lod.sequence_mask( seq_len, dtype=paddle.get_default_dtype() ) diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py index 09f216fd5ef..bf10e2c1883 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py @@ -21,7 +21,6 @@ sys.path.append("../") from op_test import OpTest import paddle -import paddle.fluid as fluid import paddle.fluid.core as core @@ -185,7 +184,9 @@ class TestSequencePadOpError(unittest.TestCase): self.assertRaises(TypeError, test_dtype) def test_length_dtype(self): - x = fluid.data(name='x', shape=[10, 5], dtype='float32', lod_level=1) + x = paddle.static.data( + name='x', shape=[10, 5], dtype='float32', lod_level=1 + ) pad_value = paddle.assign(np.array([0.0], dtype=np.float32)) out, length = paddle.static.nn.sequence_lod.sequence_pad( diff --git a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_attention.py b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_attention.py index 37048d7cd25..dca8c28e4d3 100644 --- a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_attention.py +++ b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_attention.py @@ -111,7 +111,7 @@ class TestModelParallel(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data seq_len = 2 - data_in = fluid.data( + data_in = paddle.static.data( name='data_in', shape=[batch_size, seq_len, hidden], dtype=DTYPE ) diff --git a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_feedforward.py b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_feedforward.py index 4fca47635a1..5a4c2cc5697 100644 --- a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_feedforward.py +++ b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_feedforward.py @@ -102,7 +102,7 @@ class TestModelParallel(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data seq_len = 2 - data_in = fluid.data( + data_in = paddle.static.data( name='data_in', shape=[batch_size, seq_len, IN_SIZE], dtype=DTYPE ) diff --git a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_multi_transformer.py b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_multi_transformer.py index 9c863d6d3be..8f14f26478c 100644 --- a/python/paddle/fluid/tests/unittests/static_model_parallel_fused_multi_transformer.py +++ b/python/paddle/fluid/tests/unittests/static_model_parallel_fused_multi_transformer.py @@ -144,7 +144,7 @@ class TestModelParallel(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data seq_len = 2 - data_in = fluid.data( + data_in = paddle.static.data( name='data_in', shape=[batch_size, seq_len, hidden], dtype=DTYPE ) diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 64e6ea606fb..ed9bea13b8f 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -181,7 +181,7 @@ class TestExpm1API(unittest.TestCase): def run(place): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - X = paddle.fluid.data('X', self.shape, dtype=self.dtype) + X = paddle.static.data('X', self.shape, dtype=self.dtype) out = paddle.expm1(X) exe = paddle.static.Executor(place) res = exe.run(feed={'X': self.x}) @@ -203,7 +203,7 @@ class TestExpm1API(unittest.TestCase): def test_errors(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - X = paddle.fluid.data('X', self.shape, dtype='int32') + X = paddle.static.data('X', self.shape, dtype='int32') self.assertRaises(TypeError, paddle.expm1, X) # The input dtype must be float16, float32, float64. @@ -357,7 +357,7 @@ class TestSiluAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [11, 17]) + x = paddle.static.data('X', [11, 17]) out1 = F.silu(x) m = paddle.nn.Silu() out2 = m(x) @@ -382,12 +382,12 @@ class TestSiluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.silu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[11, 17], dtype='int32' ) self.assertRaises(TypeError, F.silu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[11, 17], dtype='float16' ) F.silu(x_fp16) @@ -432,7 +432,7 @@ class TestLogSigmoidAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [11, 17]) + x = paddle.static.data('X', [11, 17]) out1 = F.log_sigmoid(x) m = paddle.nn.LogSigmoid() out2 = m(x) @@ -457,12 +457,12 @@ class TestLogSigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.log_sigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[11, 17], dtype='int32' ) self.assertRaises(TypeError, F.log_sigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[11, 17], dtype='float16' ) F.log_sigmoid(x_fp16) @@ -518,7 +518,7 @@ class TestTanhAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12], self.dtype) + x = paddle.static.data('X', [10, 12], self.dtype) out1 = self.tanh(x) th = paddle.nn.Tanh() out2 = th(x) @@ -544,12 +544,12 @@ class TestTanhAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.tanh, 1) # The input dtype must be float16, float32. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, self.tanh, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) self.tanh(x_fp16) @@ -688,12 +688,12 @@ class TestSinhOpError(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, paddle.sinh, 1) # The input dtype must be float16, float32, float64. - x_int32 = fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, paddle.sinh, x_int32) # support the input dtype is float16 - x_fp16 = fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) paddle.sinh(x_fp16) @@ -779,12 +779,12 @@ class TestCoshOpError(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, paddle.cosh, 1) # The input dtype must be float16, float32, float64. - x_int32 = fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, paddle.cosh, x_int32) # support the input dtype is float16 - x_fp16 = fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) paddle.cosh(x_fp16) @@ -834,7 +834,7 @@ class TestTanhshrinkAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.tanhshrink(x) tanhshrink = paddle.nn.Tanhshrink() out2 = tanhshrink(x) @@ -859,12 +859,12 @@ class TestTanhshrinkAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.tanhshrink, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.tanhshrink, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.tanhshrink(x_fp16) @@ -932,7 +932,7 @@ class TestHardShrinkAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out1 = F.hardshrink(x) hd = paddle.nn.Hardshrink() out2 = hd(x) @@ -964,12 +964,12 @@ class TestHardShrinkAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardshrink, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.hardshrink, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.hardshrink(x_fp16) @@ -997,7 +997,7 @@ class TestHardtanhAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out1 = F.hardtanh(x) m = paddle.nn.Hardtanh() out2 = m(x) @@ -1029,12 +1029,12 @@ class TestHardtanhAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardtanh, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.hardtanh, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.hardtanh(x_fp16) @@ -1090,7 +1090,7 @@ class TestSoftshrinkAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.softshrink(x, self.threshold) softshrink = paddle.nn.Softshrink(self.threshold) out2 = softshrink(x) @@ -1115,17 +1115,17 @@ class TestSoftshrinkAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softshrink, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.softshrink, x_int32) # The threshold must be no less than zero - x_fp32 = paddle.fluid.data( + x_fp32 = paddle.static.data( name='x_fp32', shape=[12, 10], dtype='float32' ) self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.softshrink(x_fp16) @@ -1845,7 +1845,7 @@ class TestReluAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out1 = self.relu(x) m = paddle.nn.ReLU() out2 = m(x) @@ -1871,12 +1871,12 @@ class TestReluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.relu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[10, 12], dtype='int32' ) self.assertRaises(TypeError, self.relu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[10, 12], dtype='float16' ) self.relu(x_fp16) @@ -1955,7 +1955,7 @@ class TestLeakyReluAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out1 = F.leaky_relu(x) m = paddle.nn.LeakyReLU() out2 = m(x) @@ -1987,12 +1987,12 @@ class TestLeakyReluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.leaky_relu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.leaky_relu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.leaky_relu(x_fp16) @@ -2092,7 +2092,7 @@ class TestGELUAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [11, 17]) + x = paddle.static.data('X', [11, 17], dtype="float32") out1 = F.gelu(x) m = paddle.nn.GELU() out2 = m(x) @@ -2124,12 +2124,12 @@ class TestGELUAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.gelu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[11, 17], dtype='int32' ) self.assertRaises(TypeError, F.gelu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[11, 17], dtype='float16' ) F.gelu(x_fp16) @@ -2214,7 +2214,7 @@ class TestRelu6API(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.relu6(x) relu6 = paddle.nn.ReLU6() out2 = relu6(x) @@ -2236,7 +2236,7 @@ class TestRelu6API(unittest.TestCase): def test_fluid_api(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.relu6(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -2249,12 +2249,12 @@ class TestRelu6API(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.relu6, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.relu6, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.relu6(x_fp16) @@ -2361,7 +2361,7 @@ class TestHardswishAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.hardswish(x) m = paddle.nn.Hardswish() out2 = m(x) @@ -2383,7 +2383,7 @@ class TestHardswishAPI(unittest.TestCase): def test_fluid_api(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.hardswish(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -2400,12 +2400,12 @@ class TestHardswishAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardswish, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.hardswish, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.hardswish(x_fp16) @@ -2501,7 +2501,7 @@ class TestELUAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out1 = self.elu(x) m = paddle.nn.ELU() out2 = m(x) @@ -2535,12 +2535,12 @@ class TestELUAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.elu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[10, 12], dtype='int32' ) self.assertRaises(TypeError, self.elu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[10, 12], dtype='float16' ) self.elu(x_fp16) @@ -2608,7 +2608,7 @@ class TestCELUAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out1 = self.celu(x, 1.5) m = paddle.nn.CELU(1.5) out2 = m(x) @@ -2642,17 +2642,17 @@ class TestCELUAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.celu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[10, 12], dtype='int32' ) self.assertRaises(TypeError, self.celu, x_int32) # The alpha must be not equal 0 - x_fp32 = paddle.fluid.data( + x_fp32 = paddle.static.data( name='x_fp32', shape=[10, 12], dtype='float32' ) self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[10, 12], dtype='float16' ) self.celu(x_fp16) @@ -3164,7 +3164,7 @@ class TestSTanhAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12]) out = paddle.stanh(x, self.scale_a, self.scale_b) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -3182,7 +3182,7 @@ class TestSTanhAPI(unittest.TestCase): def test_fluid_api(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', [10, 12]) + x = paddle.static.data('X', [10, 12], dtype="float32") out = paddle.stanh(x, self.scale_a, self.scale_b) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -3195,12 +3195,12 @@ class TestSTanhAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, paddle.stanh, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, paddle.stanh, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) paddle.stanh(x_fp16) @@ -3303,7 +3303,7 @@ class TestSoftplusAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.softplus(x, self.beta, self.threshold) softplus = paddle.nn.Softplus(self.beta, self.threshold) out2 = softplus(x) @@ -3328,12 +3328,12 @@ class TestSoftplusAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softplus, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.softplus, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.softplus(x_fp16) @@ -3386,7 +3386,7 @@ class TestSoftsignAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.softsign(x) softsign = paddle.nn.Softsign() out2 = softsign(x) @@ -3411,12 +3411,12 @@ class TestSoftsignAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.softsign, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.softsign, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.softsign(x_fp16) @@ -3474,7 +3474,7 @@ class TestThresholdedReluAPI(unittest.TestCase): def test_static_api(self): with paddle_static_guard(): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.thresholded_relu(x, self.threshold) thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold) out2 = thresholded_relu(x) @@ -3499,12 +3499,12 @@ class TestThresholdedReluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.thresholded_relu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.thresholded_relu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.thresholded_relu(x_fp16) @@ -3597,13 +3597,14 @@ class TestHardsigmoidAPI(unittest.TestCase): def test_fluid_api(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.hardsigmoid(x, slope=0.2) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) + paddle.disable_static(self.place) x = paddle.to_tensor(self.x_np) out = paddle.nn.functional.hardsigmoid(x, slope=0.2) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) @@ -3614,12 +3615,12 @@ class TestHardsigmoidAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.hardsigmoid, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.hardsigmoid, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.hardsigmoid(x_fp16) @@ -3697,7 +3698,7 @@ class TestSwishAPI(unittest.TestCase): def test_fluid_api(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.swish(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -3710,12 +3711,12 @@ class TestSwishAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.swish, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.swish, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.swish(x_fp16) @@ -3794,7 +3795,7 @@ class TestMishAPI(unittest.TestCase): def test_fluid_api(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.mish(x) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -3807,12 +3808,12 @@ class TestMishAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.mish, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.mish, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.mish(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index cd40d401c45..1b1a01183ab 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -656,7 +656,7 @@ class TestAdamOpV2(unittest.TestCase): startup = fluid.Program() with fluid.program_guard(train_prog, startup): with fluid.unique_name.guard(): - data = fluid.data(name="data", shape=shape) + data = paddle.static.data(name="data", shape=shape) conv = paddle.static.nn.conv2d(data, 8, 3) loss = paddle.mean(conv) @@ -982,8 +982,8 @@ class TestAdamOptimizer(unittest.TestCase): trainable=True, ) with fluid.program_guard(main): - x = fluid.data(name='x', shape=[None, 13], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[None, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, weight_attr=weight_attr) cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y diff --git a/python/paddle/fluid/tests/unittests/test_adam_optimizer_fp32_fp64.py b/python/paddle/fluid/tests/unittests/test_adam_optimizer_fp32_fp64.py index bb863ad73e4..cf9640988f1 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_optimizer_fp32_fp64.py +++ b/python/paddle/fluid/tests/unittests/test_adam_optimizer_fp32_fp64.py @@ -30,8 +30,8 @@ def main_test_func(place, dtype): startup = fluid.Program() with fluid.program_guard(main, startup): with fluid.scope_guard(fluid.Scope()): - x = fluid.data(name='x', shape=[None, 13], dtype=dtype) - y = fluid.data(name='y', shape=[None, 1], dtype=dtype) + x = paddle.static.data(name='x', shape=[None, 13], dtype=dtype) + y = paddle.static.data(name='y', shape=[None, 1], dtype=dtype) y_predict = paddle.static.nn.fc(x, size=1) cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y diff --git a/python/paddle/fluid/tests/unittests/test_adamax_api.py b/python/paddle/fluid/tests/unittests/test_adamax_api.py index 9aeeb9e0d4b..30eca5d305f 100644 --- a/python/paddle/fluid/tests/unittests/test_adamax_api.py +++ b/python/paddle/fluid/tests/unittests/test_adamax_api.py @@ -45,7 +45,7 @@ class TestAdamaxAPI(unittest.TestCase): startup = fluid.Program() with fluid.program_guard(train_prog, startup): with fluid.unique_name.guard(): - data = fluid.data(name="data", shape=shape) + data = paddle.static.data(name="data", shape=shape) conv = paddle.static.nn.conv2d(data, 8, 3) loss = paddle.mean(conv) beta1 = 0.85 diff --git a/python/paddle/fluid/tests/unittests/test_adamw_op.py b/python/paddle/fluid/tests/unittests/test_adamw_op.py index ead0a00ac11..4037659b08c 100644 --- a/python/paddle/fluid/tests/unittests/test_adamw_op.py +++ b/python/paddle/fluid/tests/unittests/test_adamw_op.py @@ -252,7 +252,7 @@ class TestAdamWOp(unittest.TestCase): startup = fluid.Program() with fluid.program_guard(train_prog, startup): with fluid.unique_name.guard(): - data = fluid.data(name="data", shape=shape) + data = paddle.static.data(name="data", shape=shape) conv = paddle.static.nn.conv2d(data, 8, 3) loss = paddle.mean(conv) @@ -767,8 +767,12 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp): startup = fluid.Program() with fluid.program_guard(train_prog, startup): with fluid.unique_name.guard(): - x = fluid.data(name='x', shape=[None, 10], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') + x = paddle.static.data( + name='x', shape=[None, 10], dtype='float32' + ) + y = paddle.static.data( + name='y', shape=[None, 1], dtype='float32' + ) weight_attr1 = paddle.framework.ParamAttr(name="linear_0.w_0") bias_attr1 = paddle.framework.ParamAttr( diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py index ab76a61017b..de621bd2fb2 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool1d.py @@ -108,7 +108,9 @@ class TestPool1D_API(unittest.TestCase): def check_adaptive_avg_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[2, 3, 32], dtype="float32") + input = paddle.static.data( + name="input", shape=[2, 3, 32], dtype="float32" + ) result = F.adaptive_avg_pool1d(input, output_size=16) input_np = np.random.random([2, 3, 32]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py index f2dccd4d63b..1566a4eb4ad 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py @@ -121,7 +121,9 @@ class TestAdaptiveAvgPool2DAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 7, 7], dtype="float32" + ) out_1 = paddle.nn.functional.adaptive_avg_pool2d( x=x, output_size=[3, 3] @@ -230,7 +232,9 @@ class TestAdaptiveAvgPool2DClassAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 7, 7], dtype="float32" + ) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2D(output_size=[3, 3]) out_1 = adaptive_avg_pool(x=x) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py index c1e6a886688..77d28e48489 100755 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py @@ -141,7 +141,7 @@ class TestAdaptiveAvgPool3DAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data( + x = paddle.static.data( name="x", shape=[2, 3, 5, 7, 7], dtype="float32" ) @@ -255,7 +255,7 @@ class TestAdaptiveAvgPool3DClassAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data( + x = paddle.static.data( name="x", shape=[2, 3, 5, 7, 7], dtype="float32" ) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py index 6b249ea134d..ea2cd317d0f 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool1d.py @@ -95,7 +95,7 @@ class TestPool1D_API(unittest.TestCase): def check_adaptive_max_static_results(self, place): with paddle_static_guard(): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 32], dtype="float32" ) result = F.adaptive_max_pool1d(input, output_size=16) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py index b828b7becda..52a2edd6f97 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py @@ -122,7 +122,9 @@ class TestAdaptiveMaxPool2DAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 7, 7], dtype="float32" + ) out_1 = paddle.nn.functional.adaptive_max_pool2d( x=x, output_size=[3, 3] @@ -225,7 +227,9 @@ class TestAdaptiveMaxPool2DClassAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data(name="x", shape=[2, 3, 7, 7], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 7, 7], dtype="float32" + ) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[3, 3]) out_1 = adaptive_max_pool(x=x) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py index 521dacd6399..4e6cb9864f0 100755 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py @@ -143,7 +143,7 @@ class TestAdaptiveMaxPool3DAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data( + x = paddle.static.data( name="x", shape=[2, 3, 5, 7, 7], dtype="float32" ) @@ -248,7 +248,7 @@ class TestAdaptiveMaxPool3DClassAPI(unittest.TestCase): ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x = paddle.fluid.data( + x = paddle.static.data( name="x", shape=[2, 3, 5, 7, 7], dtype="float32" ) diff --git a/python/paddle/fluid/tests/unittests/test_add_reader_dependency.py b/python/paddle/fluid/tests/unittests/test_add_reader_dependency.py index 5cf3953cb84..d9fbc155083 100644 --- a/python/paddle/fluid/tests/unittests/test_add_reader_dependency.py +++ b/python/paddle/fluid/tests/unittests/test_add_reader_dependency.py @@ -17,6 +17,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid.layer_helper import LayerHelper @@ -47,7 +48,9 @@ class TestAddReaderDependency(unittest.TestCase): def run_main(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.scope_guard(fluid.Scope()): - tmp_in = fluid.data(name='tmp_in', dtype='float32', shape=[1]) + tmp_in = paddle.static.data( + name='tmp_in', dtype='float32', shape=[1] + ) loader = fluid.io.DataLoader.from_generator( feed_list=[tmp_in], capacity=16, @@ -62,7 +65,7 @@ class TestAddReaderDependency(unittest.TestCase): low=-1, high=1, size=[1] ).astype('float32'), - persistable_in = fluid.data( + persistable_in = paddle.static.data( name='persistable_in', dtype='float32', shape=[1] ) persistable_in.persistable = True diff --git a/python/paddle/fluid/tests/unittests/test_affine_grid_function.py b/python/paddle/fluid/tests/unittests/test_affine_grid_function.py index 16a67d765f8..20114a5304d 100644 --- a/python/paddle/fluid/tests/unittests/test_affine_grid_function.py +++ b/python/paddle/fluid/tests/unittests/test_affine_grid_function.py @@ -51,7 +51,7 @@ class AffineGridTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - theta_var = fluid.data( + theta_var = paddle.static.data( "input", self.theta_shape, dtype=self.dtype ) y_var = paddle.nn.functional.affine_grid( @@ -69,7 +69,7 @@ class AffineGridTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - theta_var = fluid.data( + theta_var = paddle.static.data( "input", self.theta_shape, dtype=self.dtype ) y_var = F.affine_grid( diff --git a/python/paddle/fluid/tests/unittests/test_allclose_layer.py b/python/paddle/fluid/tests/unittests/test_allclose_layer.py index 459c3e36626..b1d6e06db9e 100644 --- a/python/paddle/fluid/tests/unittests/test_allclose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_allclose_layer.py @@ -24,8 +24,8 @@ paddle.enable_static() class TestAllcloseLayer(unittest.TestCase): def allclose_check(self, use_cuda, dtype='float32'): - a = fluid.data(name="a", shape=[2], dtype=dtype) - b = fluid.data(name="b", shape=[2], dtype=dtype) + a = paddle.static.data(name="a", shape=[2], dtype=dtype) + b = paddle.static.data(name="b", shape=[2], dtype=dtype) result = paddle.allclose( a, b, rtol=1e-05, atol=1e-08, equal_nan=False, name="ignore_nan" diff --git a/python/paddle/fluid/tests/unittests/test_allclose_op.py b/python/paddle/fluid/tests/unittests/test_allclose_op.py index 94e30621ef1..ef5cc942da8 100644 --- a/python/paddle/fluid/tests/unittests/test_allclose_op.py +++ b/python/paddle/fluid/tests/unittests/test_allclose_op.py @@ -135,8 +135,10 @@ class TestAllcloseError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='int32') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') + x = paddle.static.data(name='x', shape=[10, 10], dtype='int32') + y = paddle.static.data( + name='y', shape=[10, 10], dtype='float64' + ) result = paddle.allclose(x, y) self.assertRaises(TypeError, test_x_dtype) @@ -145,15 +147,17 @@ class TestAllcloseError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='int32') + x = paddle.static.data( + name='x', shape=[10, 10], dtype='float64' + ) + y = paddle.static.data(name='y', shape=[10, 10], dtype='int32') result = paddle.allclose(x, y) self.assertRaises(TypeError, test_y_dtype) def test_attr(self): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') + x = paddle.static.data(name='x', shape=[10, 10], dtype='float64') + y = paddle.static.data(name='y', shape=[10, 10], dtype='float64') def test_rtol(): result = paddle.allclose(x, y, rtol=True) diff --git a/python/paddle/fluid/tests/unittests/test_argsort_op.py b/python/paddle/fluid/tests/unittests/test_argsort_op.py index 2b993280af7..3d3341a5d68 100644 --- a/python/paddle/fluid/tests/unittests/test_argsort_op.py +++ b/python/paddle/fluid/tests/unittests/test_argsort_op.py @@ -392,7 +392,7 @@ class TestArgsort(unittest.TestCase): def test_api(self): with fluid.program_guard(fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=self.input_shape, dtype="float64" ) diff --git a/python/paddle/fluid/tests/unittests/test_ascend_trigger.py b/python/paddle/fluid/tests/unittests/test_ascend_trigger.py index 917ef5606f7..31b73a02b78 100644 --- a/python/paddle/fluid/tests/unittests/test_ascend_trigger.py +++ b/python/paddle/fluid/tests/unittests/test_ascend_trigger.py @@ -26,8 +26,12 @@ class TestAscendTriggerOP(unittest.TestCase): program = fluid.Program() block = program.global_block() with fluid.program_guard(program): - x = fluid.data(name='x', shape=[1], dtype='int64', lod_level=0) - y = fluid.data(name='y', shape=[1], dtype='int64', lod_level=0) + x = paddle.static.data( + name='x', shape=[1], dtype='int64', lod_level=0 + ) + y = paddle.static.data( + name='y', shape=[1], dtype='int64', lod_level=0 + ) block.append_op( type="ascend_trigger", inputs={"FeedList": [x]}, diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index c610aeaa32d..5d35c82b9ae 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -76,7 +76,7 @@ class TestAssignOpWithLoDTensorArray(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program): - x = fluid.data(name='x', shape=[100, 10], dtype='float32') + x = paddle.static.data(name='x', shape=[100, 10], dtype='float32') x.stop_gradient = False y = paddle.tensor.fill_constant( shape=[100, 10], dtype='float32', value=1 @@ -129,7 +129,7 @@ class TestAssignOApi(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program): - x = fluid.data(name='x', shape=[100, 10], dtype='float32') + x = paddle.static.data(name='x', shape=[100, 10], dtype='float32') x.stop_gradient = False y = paddle.tensor.fill_constant( shape=[100, 10], dtype='float32', value=1 diff --git a/python/paddle/fluid/tests/unittests/test_assign_pos_op.py b/python/paddle/fluid/tests/unittests/test_assign_pos_op.py index 13cb3eccf80..ea517e2f978 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_pos_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_pos_op.py @@ -105,8 +105,8 @@ class TestAssignPosAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('x', self.x.shape, dtype="int64") - cum_count = paddle.fluid.data( + x = paddle.static.data('x', self.x.shape, dtype="int64") + cum_count = paddle.static.data( 'cum_count', self.cum_count.shape, dtype="int64" ) out = utils._assign_pos(x, cum_count) diff --git a/python/paddle/fluid/tests/unittests/test_atan2_op.py b/python/paddle/fluid/tests/unittests/test_atan2_op.py index 7dd3ceaca8b..2e6272665f5 100644 --- a/python/paddle/fluid/tests/unittests/test_atan2_op.py +++ b/python/paddle/fluid/tests/unittests/test_atan2_op.py @@ -103,8 +103,8 @@ class TestAtan2API(unittest.TestCase): def run(place): with paddle.static.program_guard(paddle.static.Program()): - X1 = paddle.fluid.data('X1', self.shape, dtype=self.dtype) - X2 = paddle.fluid.data('X2', self.shape, dtype=self.dtype) + X1 = paddle.static.data('X1', self.shape, dtype=self.dtype) + X2 = paddle.static.data('X2', self.shape, dtype=self.dtype) out = paddle.atan2(X1, X2) exe = paddle.static.Executor(place) res = exe.run(feed={'X1': self.x1, 'X2': self.x2}) @@ -163,8 +163,8 @@ class TestAtan2Error(unittest.TestCase): paddle.enable_static() def test_mismatch_numel(): - X = paddle.fluid.data('X', (1,), dtype=np.float64) - Y = paddle.fluid.data('Y', (0,), dtype=np.float64) + X = paddle.static.data('X', (1,), dtype=np.float64) + Y = paddle.static.data('Y', (0,), dtype=np.float64) out = paddle.atan2(X, Y) self.assertRaises(ValueError, test_mismatch_numel) diff --git a/python/paddle/fluid/tests/unittests/test_auc_op.py b/python/paddle/fluid/tests/unittests/test_auc_op.py index 428e6f8ec76..86057cbd083 100644 --- a/python/paddle/fluid/tests/unittests/test_auc_op.py +++ b/python/paddle/fluid/tests/unittests/test_auc_op.py @@ -142,8 +142,12 @@ class TestAucOpError(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): def test_type1(): - data1 = fluid.data(name="input1", shape=[-1, 2], dtype="int") - label1 = fluid.data(name="label1", shape=[-1], dtype="int") + data1 = paddle.static.data( + name="input1", shape=[-1, 2], dtype="int" + ) + label1 = paddle.static.data( + name="label1", shape=[-1], dtype="int" + ) ins_tag_w1 = paddle.static.data( name="label1", shape=[-1], dtype="int" ) @@ -154,10 +158,12 @@ class TestAucOpError(unittest.TestCase): self.assertRaises(TypeError, test_type1) def test_type2(): - data2 = fluid.data( + data2 = paddle.static.data( name="input2", shape=[-1, 2], dtype="float32" ) - label2 = fluid.data(name="label2", shape=[-1], dtype="float32") + label2 = paddle.static.data( + name="label2", shape=[-1], dtype="float32" + ) result2 = paddle.static.auc(input=data2, label=label2) self.assertRaises(TypeError, test_type2) diff --git a/python/paddle/fluid/tests/unittests/test_backward.py b/python/paddle/fluid/tests/unittests/test_backward.py index 41fa23658c4..9d5cdbcf542 100644 --- a/python/paddle/fluid/tests/unittests/test_backward.py +++ b/python/paddle/fluid/tests/unittests/test_backward.py @@ -226,10 +226,16 @@ class SimpleNet(BackwardNet): def build_model(self): # stop_gradient = True in input - x = fluid.data(name='x_no_grad', shape=self.shape, dtype='int64') - x2 = fluid.data(name='x2_no_grad', shape=self.shape, dtype='int64') - x3 = fluid.data(name='x3_no_grad', shape=self.shape, dtype='int64') - label = fluid.data( + x = paddle.static.data( + name='x_no_grad', shape=self.shape, dtype='int64' + ) + x2 = paddle.static.data( + name='x2_no_grad', shape=self.shape, dtype='int64' + ) + x3 = paddle.static.data( + name='x3_no_grad', shape=self.shape, dtype='int64' + ) + label = paddle.static.data( name='label_no_grad', shape=[self.shape[0], 1], dtype='float32' ) # shared layer, the grad of 'w2v' will be summed and renamed. @@ -283,7 +289,7 @@ class TestSimpleNet(TestBackward): class TestGradientsError(unittest.TestCase): def test_error(self): - x = fluid.data(name='x', shape=[None, 2, 8, 8], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 2, 8, 8], dtype='float32') x.stop_gradient = False conv = paddle.static.nn.conv2d(x, 4, 1, bias_attr=False) y = F.relu(conv) @@ -309,7 +315,9 @@ class TestSimpleNetWithErrorParamList(TestBackward): with self.assertRaises(TypeError): self._check_error_param_list(self.net, "test") # The type of parameter_list's member must be Variable or str - test = fluid.data(name='test', shape=[None, 90], dtype='float32') + test = paddle.static.data( + name='test', shape=[None, 90], dtype='float32' + ) with self.assertRaises(TypeError): self._check_error_param_list(self.net, [test, "test", 3]) @@ -322,15 +330,17 @@ class TestSimpleNetWithErrorNoGradSet(TestBackward): with self.assertRaises(TypeError): self._check_error_no_grad_set(self.net, "test") # The type of no_grad_set's member must be Variable or str - test = fluid.data(name='test', shape=[None, 90], dtype='float32') + test = paddle.static.data( + name='test', shape=[None, 90], dtype='float32' + ) with self.assertRaises(TypeError): self._check_error_no_grad_set(self.net, [test, "test", 3]) class TestAppendBackwardWithError(unittest.TestCase): def build_net(self): - x = fluid.data(name='x', shape=[None, 13], dtype='int64') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') + x = paddle.static.data(name='x', shape=[None, 13], dtype='int64') + y = paddle.static.data(name='y', shape=[None, 1], dtype='float32') x_emb = paddle.static.nn.embedding(x, size=[100, 256]) y_predict = paddle.static.nn.fc(x=x_emb, size=1, name='my_fc') loss = paddle.nn.functional.square_error_cost(input=y_predict, label=y) diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index 9d3c363dc04..b438fc5293e 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -829,7 +829,9 @@ class TestDygraphBatchNormTrainableStats(unittest.TestCase): is_test=is_test, trainable_statistics=trainable_statistics, ) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -846,7 +848,7 @@ class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase): with program_guard(Program(), Program()): paddle.enable_static() x = np.random.random(size=(3, 10, 3, 7)).astype('float32') - x = fluid.data(name='x', shape=x.shape, dtype=x.dtype) + x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) # Set this FLAG, the BatchNorm API will pass "reserve_space" argument into batch_norm op. os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = '1' batch_norm = paddle.nn.BatchNorm(7, data_layout="NHWC") diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py index d6127ff5dd7..c3d6eba112b 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py @@ -226,7 +226,9 @@ class TestBatchNorm(unittest.TestCase): is_test=is_test, trainable_statistics=trainable_statistics, ) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -235,7 +237,9 @@ class TestBatchNorm(unittest.TestCase): def compute_v2(x_np): with program_guard(Program(), Program()): bn = paddle.nn.BatchNorm2D(shape[1]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] diff --git a/python/paddle/fluid/tests/unittests/test_bce_loss.py b/python/paddle/fluid/tests/unittests/test_bce_loss.py index 0acb64502b9..84dfd8f8ef4 100644 --- a/python/paddle/fluid/tests/unittests/test_bce_loss.py +++ b/python/paddle/fluid/tests/unittests/test_bce_loss.py @@ -27,14 +27,14 @@ def test_static_layer( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=input_np.shape, dtype='float64' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float64' ) if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float64' ) bce_loss = paddle.nn.loss.BCELoss( @@ -60,14 +60,14 @@ def test_static_functional( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=input_np.shape, dtype='float64' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float64' ) if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float64' ) res = paddle.nn.functional.binary_cross_entropy( diff --git a/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py b/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py index 91c818eba75..42f0365635d 100644 --- a/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py +++ b/python/paddle/fluid/tests/unittests/test_bce_with_logits_loss.py @@ -52,10 +52,10 @@ def test_static( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - logit = paddle.fluid.data( + logit = paddle.static.data( name='logit', shape=logit_np.shape, dtype='float64' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float64' ) feed_dict = {"logit": logit_np, "label": label_np} @@ -63,12 +63,12 @@ def test_static( pos_weight = None weight = None if pos_weight_np is not None: - pos_weight = paddle.fluid.data( + pos_weight = paddle.static.data( name='pos_weight', shape=pos_weight_np.shape, dtype='float64' ) feed_dict["pos_weight"] = pos_weight_np if weight_np is not None: - weight = paddle.fluid.data( + weight = paddle.static.data( name='weight', shape=weight_np.shape, dtype='float64' ) feed_dict["weight"] = weight_np diff --git a/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py b/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py index 7372755936d..a914459fa05 100644 --- a/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_bicubic_interp_op.py @@ -296,16 +296,18 @@ class TestBicubicInterpOpAPI(unittest.TestCase): with fluid.program_guard(prog, startup_prog): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) - dim = fluid.data(name="dim", shape=[1], dtype="int32") - shape_tensor = fluid.data( + dim = paddle.static.data(name="dim", shape=[1], dtype="int32") + shape_tensor = paddle.static.data( name="shape_tensor", shape=[2], dtype="int32" ) - actual_size = fluid.data( + actual_size = paddle.static.data( name="actual_size", shape=[2], dtype="int32" ) - scale_tensor = fluid.data( + scale_tensor = paddle.static.data( name="scale_tensor", shape=[1], dtype="float32" ) @@ -372,37 +374,45 @@ class TestBicubicOpError(unittest.TestCase): def test_mode_type(): # mode must be "BILINEAR" "TRILINEAR" "NEAREST" "BICUBIC" - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=[12, 12], mode='UNKONWN', align_corners=False ) def test_input_shape(): - x = fluid.data(name="x", shape=[2], dtype="float32") + x = paddle.static.data(name="x", shape=[2], dtype="float32") out = interpolate( x, size=[12, 12], mode='BICUBIC', align_corners=False ) def test_size_shape(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=[12], mode='BICUBIC', align_corners=False ) def test_align_corcers(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) interpolate(x, size=[12, 12], mode='BICUBIC', align_corners=3) def test_out_shape(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=[12], mode='bicubic', align_corners=False ) def test_attr_data_format(): # for 5-D input, data_format only can be NCDHW or NDHWC - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 6, 9, 4], dtype="float32" ) out = interpolate( @@ -423,7 +433,9 @@ class TestBicubicOpError(unittest.TestCase): def test_scale_value(): # the scale must be greater than zero. - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -434,7 +446,7 @@ class TestBicubicOpError(unittest.TestCase): def test_attr_5D_input(): # for 5-D input, data_format only can be NCDHW or NDHWC - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 6, 9, 4], dtype="float32" ) out = interpolate( @@ -446,7 +458,9 @@ class TestBicubicOpError(unittest.TestCase): def test_scale_type(): # the scale must be greater than zero. - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) scale = fluid.create_lod_tensor( np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() ) @@ -459,7 +473,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_align_mode(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -470,7 +486,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_outshape_and_scale(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, diff --git a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py index 30b6cb684d6..afbbe26a6ad 100644 --- a/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_bicubic_interp_v2_op.py @@ -374,16 +374,18 @@ class TestBicubicInterpOpAPI(unittest.TestCase): with fluid.program_guard(prog, startup_prog): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) - dim = fluid.data(name="dim", shape=[1], dtype="int32") - shape_tensor = fluid.data( + dim = paddle.static.data(name="dim", shape=[1], dtype="int32") + shape_tensor = paddle.static.data( name="shape_tensor", shape=[2], dtype="int32" ) - actual_size = fluid.data( + actual_size = paddle.static.data( name="actual_size", shape=[2], dtype="int32" ) - scale_tensor = fluid.data( + scale_tensor = paddle.static.data( name="scale_tensor", shape=[1], dtype="float32" ) @@ -455,29 +457,35 @@ class TestBicubicOpError(unittest.TestCase): def test_mode_type(): # mode must be "BILINEAR" "TRILINEAR" "NEAREST" "BICUBIC" - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=[12, 12], mode='UNKONWN', align_corners=False ) def test_input_shape(): - x = fluid.data(name="x", shape=[2], dtype="float32") + x = paddle.static.data(name="x", shape=[2], dtype="float32") out = interpolate( x, size=[12, 12], mode='BICUBIC', align_corners=False ) def test_align_corcers(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) interpolate(x, size=[12, 12], mode='BICUBIC', align_corners=3) def test_out_shape(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate(x, size=[12], mode='bicubic', align_corners=False) def test_attr_data_format(): # for 5-D input, data_format only can be NCDHW or NDHWC - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 6, 9, 4], dtype="float32" ) out = interpolate( @@ -495,7 +503,9 @@ class TestBicubicOpError(unittest.TestCase): def test_scale_value(): # the scale must be greater than zero. - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -506,7 +516,7 @@ class TestBicubicOpError(unittest.TestCase): def test_attr_5D_input(): # for 5-D input, data_format only can be NCDHW or NDHWC - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 6, 9, 4], dtype="float32" ) out = interpolate( @@ -515,7 +525,9 @@ class TestBicubicOpError(unittest.TestCase): def test_scale_type(): # the scale must be greater than zero. - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) scale = fluid.create_lod_tensor( np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() ) @@ -528,7 +540,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_align_mode(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -539,7 +553,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_outshape_and_scale(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -549,7 +565,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_align_corners_and_nearest(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -559,7 +577,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_scale_shape(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -569,7 +589,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_scale_value_1(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -579,7 +601,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_size_and_scale(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size=None, @@ -589,7 +613,9 @@ class TestBicubicOpError(unittest.TestCase): ) def test_size_and_scale2(): - x = fluid.data(name="input", shape=[2, 3, 6, 9, 4], dtype="float32") + x = paddle.static.data( + name="input", shape=[2, 3, 6, 9, 4], dtype="float32" + ) out = interpolate( x, size=[2, 2, 2], @@ -599,27 +625,37 @@ class TestBicubicOpError(unittest.TestCase): ) def test_size_type(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate( x, size={2, 2}, mode='bicubic', align_corners=False ) def test_size_length(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) out = interpolate(x, size=[2], mode='bicubic', align_corners=False) def test_size_tensor_ndim(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) size = paddle.to_tensor(np.array([[2, 2]])) out = interpolate(x, size=size, mode='bicubic', align_corners=False) def test_size_tensor_length(): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 3, 6, 6], dtype="float32" + ) size = paddle.to_tensor(np.array([2])) out = interpolate(x, size=size, mode='bicubic', align_corners=False) def test_input_shape_1(): - x = fluid.data(name="x", shape=[2, 1, 0, 0], dtype="float32") + x = paddle.static.data( + name="x", shape=[2, 1, 0, 0], dtype="float32" + ) out = interpolate( x, size=[3, 3], mode="bicubic", align_corners=False ) diff --git a/python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py b/python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py index ca919eebf6b..3ac5058c910 100644 --- a/python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilateral_slice_op.py @@ -193,13 +193,13 @@ class TestBilateralSliceOp1(TestBilateralSliceOp): class TestBilateralSliceApi(unittest.TestCase): def test_api(self): with paddle_static_guard(): - x = paddle.fluid.data( + x = paddle.static.data( name='x', shape=[None, 3, 25, 15], dtype='float32' ) - guide = paddle.fluid.data( + guide = paddle.static.data( name='guide', shape=[None, 25, 15], dtype='float32' ) - grid = paddle.fluid.data( + grid = paddle.static.data( name='grid', shape=[None, None, 8, 5, 3], dtype='float32' ) paddle.fluid.contrib.layers.bilateral_slice(x, guide, grid, False) diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_api.py b/python/paddle/fluid/tests/unittests/test_bilinear_api.py index 925a45f3bad..118d8cd3583 100644 --- a/python/paddle/fluid/tests/unittests/test_bilinear_api.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_api.py @@ -32,8 +32,8 @@ class TestBilinearAPI(unittest.TestCase): place = core.CPUPlace() exe = fluid.Executor(place) - data1 = fluid.data(name='X1', shape=[5, 5], dtype='float32') - data2 = fluid.data(name='X2', shape=[5, 4], dtype='float32') + data1 = paddle.static.data(name='X1', shape=[5, 5], dtype='float32') + data2 = paddle.static.data(name='X2', shape=[5, 4], dtype='float32') layer1 = np.random.random((5, 5)).astype('float32') layer2 = np.random.random((5, 4)).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py index 27fd48b6532..b18db80ede9 100644 --- a/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_tensor_product_op.py @@ -32,8 +32,12 @@ class TestDygraphBilinearTensorProductAPIError(unittest.TestCase): ) self.assertRaises(TypeError, layer, x0) # the input dtype must be float32 or float64 - x1 = fluid.data(name='x1', shape=[-1, 5], dtype="float16") - x2 = fluid.data(name='x2', shape=[-1, 4], dtype="float32") + x1 = paddle.static.data( + name='x1', shape=[-1, 5], dtype="float16" + ) + x2 = paddle.static.data( + name='x2', shape=[-1, 4], dtype="float32" + ) self.assertRaises(TypeError, layer, x1, x2) # the dimensions of x and y must be 2 paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_bincount_op.py b/python/paddle/fluid/tests/unittests/test_bincount_op.py index 0e189e82c91..6585f8f5bd1 100644 --- a/python/paddle/fluid/tests/unittests/test_bincount_op.py +++ b/python/paddle/fluid/tests/unittests/test_bincount_op.py @@ -34,8 +34,10 @@ class TestBincountOpAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - inputs = fluid.data(name='input', dtype='int64', shape=[7]) - weights = fluid.data(name='weights', dtype='int64', shape=[7]) + inputs = paddle.static.data(name='input', dtype='int64', shape=[7]) + weights = paddle.static.data( + name='weights', dtype='int64', shape=[7] + ) output = paddle.bincount(inputs, weights=weights) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_calc_gradient.py b/python/paddle/fluid/tests/unittests/test_calc_gradient.py index 38a1284f0ae..f244a5db0b7 100644 --- a/python/paddle/fluid/tests/unittests/test_calc_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_calc_gradient.py @@ -86,7 +86,7 @@ class TestDoubleGrad(unittest.TestCase): class TestGradientWithPrune(unittest.TestCase): def test_prune(self): with paddle.fluid.scope_guard(paddle.static.Scope()): - x = fluid.data(name='x', shape=[3], dtype='float32') + x = paddle.static.data(name='x', shape=[3], dtype='float32') x.stop_gradient = False x1, x2, x3 = paddle.split(x, axis=0, num_or_sections=3) y = x1 * 2 diff --git a/python/paddle/fluid/tests/unittests/test_case.py b/python/paddle/fluid/tests/unittests/test_case.py index d0c6d2f9837..fd6be2ca851 100644 --- a/python/paddle/fluid/tests/unittests/test_case.py +++ b/python/paddle/fluid/tests/unittests/test_case.py @@ -602,14 +602,16 @@ class TestMutiTask(unittest.TestCase): INPUT_SIZE = 784 EPOCH_NUM = 2 - x = fluid.data( + x = paddle.static.data( name='x', shape=[BATCH_SIZE, INPUT_SIZE], dtype='float32' ) - y = fluid.data( + y = paddle.static.data( name='y', shape=[BATCH_SIZE, INPUT_SIZE], dtype='float32' ) - switch_id = fluid.data(name='switch_id', shape=[1], dtype='int32') + switch_id = paddle.static.data( + name='switch_id', shape=[1], dtype='int32' + ) one = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=1) adam = optimizer.Adam(learning_rate=0.001) diff --git a/python/paddle/fluid/tests/unittests/test_channel_shuffle.py b/python/paddle/fluid/tests/unittests/test_channel_shuffle.py index 5faeed36dae..14565c4094a 100644 --- a/python/paddle/fluid/tests/unittests/test_channel_shuffle.py +++ b/python/paddle/fluid/tests/unittests/test_channel_shuffle.py @@ -92,10 +92,10 @@ class TestChannelShuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 9, 4, 4], dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 4, 4, 9], dtype="float64" ) out_1 = F.channel_shuffle(x_1, 3) @@ -127,10 +127,10 @@ class TestChannelShuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 9, 4, 4], dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 4, 4, 9], dtype="float64" ) # init instance diff --git a/python/paddle/fluid/tests/unittests/test_cholesky_op.py b/python/paddle/fluid/tests/unittests/test_cholesky_op.py index 764fd79cf5d..9859e8da267 100644 --- a/python/paddle/fluid/tests/unittests/test_cholesky_op.py +++ b/python/paddle/fluid/tests/unittests/test_cholesky_op.py @@ -116,7 +116,9 @@ class TestCholeskySingularAPI(unittest.TestCase): def check_static_result(self, place, with_out=False): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[4, 4], dtype="float64") + input = paddle.static.data( + name="input", shape=[4, 4], dtype="float64" + ) result = paddle.cholesky(input) input_np = np.zeros([4, 4]).astype("float64") diff --git a/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py b/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py index 27334f12a86..47d69ff7d26 100644 --- a/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_cholesky_solve_op.py @@ -172,8 +172,8 @@ class TestCholeskySolveAPI(unittest.TestCase): def check_static_result(self, place): paddle.enable_static() with fluid.program_guard(fluid.Program(), fluid.Program()): - x = fluid.data(name="x", shape=[10, 2], dtype=self.dtype) - y = fluid.data(name="y", shape=[10, 10], dtype=self.dtype) + x = paddle.static.data(name="x", shape=[10, 2], dtype=self.dtype) + y = paddle.static.data(name="y", shape=[10, 10], dtype=self.dtype) z = paddle.linalg.cholesky_solve(x, y, upper=self.upper) x_np = np.random.random([10, 2]).astype(self.dtype) @@ -252,31 +252,31 @@ class TestCholeskySolveOpError(unittest.TestCase): self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x1, y1) # The data type of input must be float32 or float64. - x2 = fluid.data(name="x2", shape=[30, 30], dtype="bool") - y2 = fluid.data(name="y2", shape=[30, 10], dtype="bool") + x2 = paddle.static.data(name="x2", shape=[30, 30], dtype="bool") + y2 = paddle.static.data(name="y2", shape=[30, 10], dtype="bool") self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x2, y2) - x3 = fluid.data(name="x3", shape=[30, 30], dtype="int32") - y3 = fluid.data(name="y3", shape=[30, 10], dtype="int32") + x3 = paddle.static.data(name="x3", shape=[30, 30], dtype="int32") + y3 = paddle.static.data(name="y3", shape=[30, 10], dtype="int32") self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x3, y3) - x4 = fluid.data(name="x4", shape=[30, 30], dtype="float16") - y4 = fluid.data(name="y4", shape=[30, 10], dtype="float16") + x4 = paddle.static.data(name="x4", shape=[30, 30], dtype="float16") + y4 = paddle.static.data(name="y4", shape=[30, 10], dtype="float16") self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x4, y4) # The number of dimensions of input'X must be >= 2. - x5 = fluid.data(name="x5", shape=[30], dtype="float64") - y5 = fluid.data(name="y5", shape=[30, 30], dtype="float64") + x5 = paddle.static.data(name="x5", shape=[30], dtype="float64") + y5 = paddle.static.data(name="y5", shape=[30, 30], dtype="float64") self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x5, y5) # The number of dimensions of input'Y must be >= 2. - x6 = fluid.data(name="x6", shape=[30, 30], dtype="float64") - y6 = fluid.data(name="y6", shape=[30], dtype="float64") + x6 = paddle.static.data(name="x6", shape=[30, 30], dtype="float64") + y6 = paddle.static.data(name="y6", shape=[30], dtype="float64") self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x6, y6) # The inner-most 2 dimensions of input'X should be equal to each other - x7 = fluid.data(name="x7", shape=[2, 3, 4], dtype="float64") - y7 = fluid.data(name="y7", shape=[2, 4, 3], dtype="float64") + x7 = paddle.static.data(name="x7", shape=[2, 3, 4], dtype="float64") + y7 = paddle.static.data(name="y7", shape=[2, 4, 3], dtype="float64") self.assertRaises(ValueError, paddle.linalg.cholesky_solve, x7, y7) diff --git a/python/paddle/fluid/tests/unittests/test_chunk_op.py b/python/paddle/fluid/tests/unittests/test_chunk_op.py index f6bc1e4f03b..6c9d5004c92 100644 --- a/python/paddle/fluid/tests/unittests/test_chunk_op.py +++ b/python/paddle/fluid/tests/unittests/test_chunk_op.py @@ -26,28 +26,28 @@ class TestChunkOpError(unittest.TestCase): with program_guard(Program(), Program()): # The type of axis in chunk_op should be int or Variable. def test_axis_type(): - x1 = paddle.fluid.data(shape=[4], dtype='float16', name='x3') + x1 = paddle.static.data(shape=[4], dtype='float16', name='x3') paddle.chunk(x=x1, chunks=2, axis=3.2) self.assertRaises(TypeError, test_axis_type) # The type of axis in chunk op should be int or Variable. def test_axis_variable_type(): - x2 = paddle.fluid.data(shape=[4], dtype='float16', name='x9') - x3 = paddle.fluid.data(shape=[1], dtype='float16', name='x10') + x2 = paddle.static.data(shape=[4], dtype='float16', name='x9') + x3 = paddle.static.data(shape=[1], dtype='float16', name='x10') paddle.chunk(input=x2, chunks=2, axis=x3) self.assertRaises(TypeError, test_axis_variable_type) # The type of num_or_sections in chunk_op should be int, tuple or list. def test_chunks_type(): - x4 = paddle.fluid.data(shape=[4], dtype='float16', name='x4') + x4 = paddle.static.data(shape=[4], dtype='float16', name='x4') paddle.chunk(input=x4, chunks=2.1, axis=3) self.assertRaises(TypeError, test_chunks_type) def test_axis_type_tensor(): - x5 = paddle.fluid.data(shape=[4], dtype='float16', name='x6') + x5 = paddle.static.data(shape=[4], dtype='float16', name='x6') paddle.chunk(input=x5, chunks=2, axis=3.2) self.assertRaises(TypeError, test_axis_type_tensor) @@ -64,8 +64,10 @@ class TestChunkOpError(unittest.TestCase): class API_TestChunk(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = paddle.fluid.data('data1', shape=[4, 6, 6], dtype='float64') - data2 = paddle.fluid.data('data2', shape=[1], dtype='int32') + data1 = paddle.static.data( + 'data1', shape=[4, 6, 6], dtype='float64' + ) + data2 = paddle.static.data('data2', shape=[1], dtype='int32') x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=data2) place = paddle.CPUPlace() exe = paddle.static.Executor(place) @@ -83,7 +85,9 @@ class API_TestChunk(unittest.TestCase): class API_TestChunk1(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = paddle.fluid.data('data1', shape=[4, 6, 6], dtype='float64') + data1 = paddle.static.data( + 'data1', shape=[4, 6, 6], dtype='float64' + ) x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=2) place = paddle.CPUPlace() exe = paddle.static.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_clip_op.py b/python/paddle/fluid/tests/unittests/test_clip_op.py index d5f5bc9f195..b8e3d150e87 100644 --- a/python/paddle/fluid/tests/unittests/test_clip_op.py +++ b/python/paddle/fluid/tests/unittests/test_clip_op.py @@ -142,9 +142,11 @@ class TestClipAPI(unittest.TestCase): paddle.enable_static() data_shape = [1, 9, 9, 4] data = np.random.random(data_shape).astype('float32') - images = fluid.data(name='image', shape=data_shape, dtype='float32') - min = fluid.data(name='min', shape=[1], dtype='float32') - max = fluid.data(name='max', shape=[1], dtype='float32') + images = paddle.static.data( + name='image', shape=data_shape, dtype='float32' + ) + min = paddle.static.data(name='min', shape=[1], dtype='float32') + max = paddle.static.data(name='max', shape=[1], dtype='float32') place = ( fluid.CUDAPlace(0) @@ -292,8 +294,8 @@ class TestClipAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() - x1 = fluid.data(name='x1', shape=[1], dtype="int16") - x2 = fluid.data(name='x2', shape=[1], dtype="int8") + x1 = paddle.static.data(name='x1', shape=[1], dtype="int16") + x2 = paddle.static.data(name='x2', shape=[1], dtype="int8") self.assertRaises(TypeError, paddle.clip, x=x1, min=0.2, max=0.8) self.assertRaises(TypeError, paddle.clip, x=x2, min=0.2, max=0.8) paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_compare_op.py b/python/paddle/fluid/tests/unittests/test_compare_op.py index b105db9fa8d..0ea2e00a716 100755 --- a/python/paddle/fluid/tests/unittests/test_compare_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_op.py @@ -81,8 +81,8 @@ def create_paddle_case(op_type, callback): def test_api(self): paddle.enable_static() with program_guard(Program(), Program()): - x = fluid.data(name='x', shape=[4], dtype='int64') - y = fluid.data(name='y', shape=[4], dtype='int64') + x = paddle.static.data(name='x', shape=[4], dtype='int64') + y = paddle.static.data(name='y', shape=[4], dtype='int64') op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = fluid.Executor(self.place) @@ -96,8 +96,8 @@ def create_paddle_case(op_type, callback): if self.op_type == "equal": paddle.enable_static() with program_guard(Program(), Program()): - x = fluid.data(name='x', shape=[4], dtype='int64') - y = fluid.data(name='y', shape=[1], dtype='int64') + x = paddle.static.data(name='x', shape=[4], dtype='int64') + y = paddle.static.data(name='y', shape=[1], dtype='int64') op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = fluid.Executor(self.place) diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index 17c176af2a8..8021f6ef26e 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -340,13 +340,15 @@ class TestConcatOpError(unittest.TestCase): class TestConcatAPI(unittest.TestCase): def test_fluid_api(self): paddle.enable_static() - x_1 = fluid.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1') + x_1 = paddle.static.data( + shape=[None, 1, 4, 5], dtype='int32', name='x_1' + ) paddle.concat([x_1, x_1], 0) input_2 = np.random.random([2, 1, 4, 5]).astype("int32") input_3 = np.random.random([2, 2, 4, 5]).astype("int32") - x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') - x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') + x_2 = paddle.static.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') + x_3 = paddle.static.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') positive_1_int32 = paddle.tensor.fill_constant([1], "int32", 1) positive_1_int64 = paddle.tensor.fill_constant([1], "int64", 1) out_1 = paddle.concat([x_2, x_3], axis=1) @@ -365,15 +367,15 @@ class TestConcatAPI(unittest.TestCase): def test_api(self): paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( shape=[None, 1, 4, 5], dtype='int32', name='x_1' ) paddle.concat([x_1, x_1], 0) input_2 = np.random.random([2, 1, 4, 5]).astype("int32") input_3 = np.random.random([2, 2, 4, 5]).astype("int32") - x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') - x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') + x_2 = paddle.static.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2') + x_3 = paddle.static.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3') positive_1_int32 = paddle.tensor.fill_constant([1], "int32", 1) positive_1_int64 = paddle.tensor.fill_constant([1], "int64", 1) negative_int64 = paddle.tensor.fill_constant([1], "int64", -3) @@ -420,8 +422,8 @@ class TestConcatAPI(unittest.TestCase): ) self.assertRaises(TypeError, paddle.concat, [x2]) # The input dtype of concat_op must be float16, float32, float64, int32, int64. - x4 = paddle.fluid.data(shape=[4], dtype='uint8', name='x4') - x5 = paddle.fluid.data(shape=[4], dtype='uint8', name='x5') + x4 = paddle.static.data(shape=[4], dtype='uint8', name='x4') + x5 = paddle.static.data(shape=[4], dtype='uint8', name='x5') self.assertRaises(TypeError, paddle.concat, [x4, x5]) # The type of axis in concat_op should be int or Variable. diff --git a/python/paddle/fluid/tests/unittests/test_cond.py b/python/paddle/fluid/tests/unittests/test_cond.py index b5fb0a50f18..14aa0b638b7 100644 --- a/python/paddle/fluid/tests/unittests/test_cond.py +++ b/python/paddle/fluid/tests/unittests/test_cond.py @@ -285,7 +285,7 @@ class TestCondInputOutput(unittest.TestCase): a = paddle.tensor.fill_constant( shape=[3, 2, 1], dtype='int32', value=7 ) - i = fluid.data(name="i", shape=[1], dtype='int32') + i = paddle.static.data(name="i", shape=[1], dtype='int32') pred = (i % 2) == 0 a = paddle.static.nn.cond( pred, lambda: true_func(a, i), lambda: false_func(a, i) @@ -330,7 +330,7 @@ class TestCondInputOutput(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - i = fluid.data(name="i", shape=[1], dtype='int32') + i = paddle.static.data(name="i", shape=[1], dtype='int32') pred = (i % 2) == 0 out1 = paddle.static.nn.cond(pred, true_func, false_func) out2 = paddle.static.nn.cond(pred, None, false_func) @@ -371,7 +371,7 @@ class TestCondInputOutput(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - i = fluid.data(name="i", shape=[1], dtype='int32') + i = paddle.static.data(name="i", shape=[1], dtype='int32') pred = (i % 2) == 0 with self.assertRaises(TypeError): out = paddle.static.nn.cond(pred, i, func_return_one_tensor) @@ -477,7 +477,7 @@ class TestCondNestedControlFlow(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - i = fluid.data(name="i", shape=[1], dtype='float32') + i = paddle.static.data(name="i", shape=[1], dtype='float32') i.stop_gradient = False a = 2.0 * i out = paddle.static.nn.cond( @@ -629,10 +629,14 @@ class TestCondBackward(unittest.TestCase): startup_program = Program() startup_program.random_seed = 123 with program_guard(main_program, startup_program): - img = fluid.data(name='image', shape=[-1, 9], dtype='float32') + img = paddle.static.data( + name='image', shape=[-1, 9], dtype='float32' + ) img.stop_gradient = False - label = fluid.data(name='label', shape=[-1, 1], dtype='int64') - i = fluid.data(name="i", shape=[1], dtype='int32') + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' + ) + i = paddle.static.data(name="i", shape=[1], dtype='int32') loss = cond_func(i, img, label) append_backward(loss) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() @@ -684,9 +688,13 @@ class TestCondBackward(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - img = fluid.data(name='image', shape=[-1, 784], dtype='float32') - label = fluid.data(name='label', shape=[-1, 1], dtype='int64') - i = fluid.data(name="i", shape=[1], dtype='int32') + img = paddle.static.data( + name='image', shape=[-1, 784], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' + ) + i = paddle.static.data(name="i", shape=[1], dtype='int32') loss = cond_func(i, img, label) optimizer = fluid.optimizer.SGD(learning_rate=0.1) optimizer.minimize(loss) @@ -793,7 +801,7 @@ class TestCondWithError(unittest.TestCase): main_program = framework.Program() startup_program = framework.Program() with framework.program_guard(main_program, startup_program): - pred = fluid.data(name='y', shape=[1], dtype='bool') + pred = paddle.static.data(name='y', shape=[1], dtype='bool') def func(): return pred diff --git a/python/paddle/fluid/tests/unittests/test_conv1d_layer.py b/python/paddle/fluid/tests/unittests/test_conv1d_layer.py index 441318c5129..5e3ce31f806 100644 --- a/python/paddle/fluid/tests/unittests/test_conv1d_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv1d_layer.py @@ -93,11 +93,13 @@ class Conv1DTestCase(unittest.TestCase): if not self.channel_last else (-1, -1, self.num_channels) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data( + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) + w_var = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) - b_var = fluid.data( + b_var = paddle.static.data( "bias", (self.num_filters,), dtype=self.dtype ) y_var = F.conv1d( diff --git a/python/paddle/fluid/tests/unittests/test_conv1d_transpose_layer.py b/python/paddle/fluid/tests/unittests/test_conv1d_transpose_layer.py index 45edd261bc6..43baf478a65 100644 --- a/python/paddle/fluid/tests/unittests/test_conv1d_transpose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv1d_transpose_layer.py @@ -100,11 +100,13 @@ class Conv1DTransposeTestCase(unittest.TestCase): if not self.channel_last else (-1, -1, self.in_channels) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data( + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) + w_var = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) - b_var = fluid.data( + b_var = paddle.static.data( "bias", (self.out_channels,), dtype=self.dtype ) y_var = F.conv1d_transpose( diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_layer.py b/python/paddle/fluid/tests/unittests/test_conv2d_layer.py index 6c14ce054ff..3c9a18419c2 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_layer.py @@ -108,7 +108,9 @@ class Conv2DTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) weight_attr = paddle.nn.initializer.Assign(self.weight) if self.bias is None: bias_attr = False @@ -154,11 +156,13 @@ class Conv2DTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data( + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) + w_var = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) - b_var = fluid.data( + b_var = paddle.static.data( "bias", (self.num_filters,), dtype=self.dtype ) diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_layer.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_layer.py index 50c80c3aa32..6a7f77fdcdf 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_layer.py @@ -99,7 +99,9 @@ class Conv2DTransposeTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) weight_attr = paddle.nn.initializer.Assign(self.weight) if self.bias is None: bias_attr = False @@ -135,11 +137,13 @@ class Conv2DTransposeTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data( + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) + w_var = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) - b_var = fluid.data( + b_var = paddle.static.data( "bias", (self.num_filters,), dtype=self.dtype ) diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_layer.py b/python/paddle/fluid/tests/unittests/test_conv3d_layer.py index 8ef86daf69a..d3b1dd69601 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_layer.py @@ -95,7 +95,9 @@ class Conv3DTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) weight_attr = paddle.nn.initializer.Assign(self.weight) if self.bias is None: bias_attr = False @@ -129,11 +131,13 @@ class Conv3DTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data( + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) + w_var = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) - b_var = fluid.data( + b_var = paddle.static.data( "bias", (self.num_filters,), dtype=self.dtype ) y_var = F.conv3d( diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_layer.py b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_layer.py index 82c08348f4b..90ddc1ecfdb 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_layer.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_layer.py @@ -97,7 +97,9 @@ class Conv3DTransposeTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) weight_attr = paddle.nn.initializer.Assign(self.weight) if self.bias is None: bias_attr = False @@ -132,11 +134,13 @@ class Conv3DTransposeTestCase(unittest.TestCase): if self.channel_last else (-1, self.num_channels, -1, -1, -1) ) - x_var = fluid.data("input", input_shape, dtype=self.dtype) - w_var = fluid.data( + x_var = paddle.static.data( + "input", input_shape, dtype=self.dtype + ) + w_var = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) - b_var = fluid.data( + b_var = paddle.static.data( "bias", (self.num_filters,), dtype=self.dtype ) y_var = F.conv3d_transpose( diff --git a/python/paddle/fluid/tests/unittests/test_corr.py b/python/paddle/fluid/tests/unittests/test_corr.py index eb3eacddedf..ce4fea47c75 100644 --- a/python/paddle/fluid/tests/unittests/test_corr.py +++ b/python/paddle/fluid/tests/unittests/test_corr.py @@ -117,7 +117,7 @@ class Corr_Comeplex_Test(unittest.TestCase): def test_errors(self): paddle.enable_static() - x1 = fluid.data(name=self.dtype, shape=[2], dtype=self.dtype) + x1 = paddle.static.data(name=self.dtype, shape=[2], dtype=self.dtype) self.assertRaises(TypeError, paddle.linalg.corrcoef, x=x1) paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py index 0626066c34a..7bbe44c1e80 100644 --- a/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py +++ b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py @@ -48,8 +48,8 @@ class TestCosineSimilarityAPI(unittest.TestCase): np_x1 = np.random.rand(*shape).astype(np.float32) np_x2 = np.random.rand(*shape).astype(np.float32) - x1 = paddle.fluid.data(name="x1", shape=shape) - x2 = paddle.fluid.data(name="x2", shape=shape) + x1 = paddle.static.data(name="x1", shape=shape) + x2 = paddle.static.data(name="x2", shape=shape) result = F.cosine_similarity(x1, x2, axis=axis, eps=eps) exe = Executor(place) fetches = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py b/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py index 516b0744598..4e94a7e8139 100644 --- a/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py +++ b/python/paddle/fluid/tests/unittests/test_count_nonzero_api.py @@ -37,7 +37,7 @@ class TestCountNonzeroAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_shape) + x = paddle.static.data('X', self.x_shape) out1 = paddle.count_nonzero(x) out2 = paddle.tensor.count_nonzero(x) out3 = paddle.tensor.math.count_nonzero(x) @@ -80,7 +80,7 @@ class TestCountNonzeroAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12], 'int32') + x = paddle.static.data('X', [10, 12], 'int32') self.assertRaises(ValueError, paddle.count_nonzero, x, axis=10) diff --git a/python/paddle/fluid/tests/unittests/test_crop_op.py b/python/paddle/fluid/tests/unittests/test_crop_op.py index 26ecda5fb68..394b4a1b688 100644 --- a/python/paddle/fluid/tests/unittests/test_crop_op.py +++ b/python/paddle/fluid/tests/unittests/test_crop_op.py @@ -18,7 +18,6 @@ import numpy as np from eager_op_test import OpTest import paddle -import paddle.fluid as fluid def crop(data, offsets, crop_shape): @@ -136,7 +135,7 @@ class TestCase6(TestCropOp): class TestCropNoneOffset(unittest.TestCase): def test_crop_none_offset(self): - x = fluid.data(name="input1", shape=[3, 6, 6], dtype="float32") + x = paddle.static.data(name="input1", shape=[3, 6, 6], dtype="float32") crop_shape = [2, 2, 2] crop = paddle.crop(x, crop_shape, None) self.assertEqual(crop.shape, (2, 2, 2)) @@ -144,7 +143,7 @@ class TestCropNoneOffset(unittest.TestCase): class TestCropNoneShape(unittest.TestCase): def test_crop_none_shape(self): - x = fluid.data(name="input1", shape=[3, 6, 6], dtype="float32") + x = paddle.static.data(name="input1", shape=[3, 6, 6], dtype="float32") crop = paddle.crop(x) self.assertEqual(crop.shape, (3, 6, 6)) @@ -152,7 +151,7 @@ class TestCropNoneShape(unittest.TestCase): class TestCropError(unittest.TestCase): def test_neg_offset_error(self): with self.assertRaises(ValueError): - x = fluid.data(name='input2', shape=[1], dtype="float32") + x = paddle.static.data(name='input2', shape=[1], dtype="float32") out = paddle.crop(x, offsets=[-1]) diff --git a/python/paddle/fluid/tests/unittests/test_crop_tensor_op.py b/python/paddle/fluid/tests/unittests/test_crop_tensor_op.py index ec1028fba44..31141142ccf 100644 --- a/python/paddle/fluid/tests/unittests/test_crop_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_crop_tensor_op.py @@ -18,7 +18,6 @@ import numpy as np from eager_op_test import OpTest import paddle -import paddle.fluid as fluid def crop(data, offsets, crop_shape): @@ -227,10 +226,14 @@ class TestCropTensorOpTensorAttrCase4(TestCropTensorOpTensorAttr): class TestCropTensorException(unittest.TestCase): def test_exception(self): - input1 = fluid.data(name="input1", shape=[2, 3, 6, 6], dtype="float32") - input2 = fluid.data(name="input2", shape=[2, 3, 6, 6], dtype="float16") - dim = fluid.data(name='dim', shape=[1], dtype='int32') - offset = fluid.data(name='offset', shape=[1], dtype='int32') + input1 = paddle.static.data( + name="input1", shape=[2, 3, 6, 6], dtype="float32" + ) + input2 = paddle.static.data( + name="input2", shape=[2, 3, 6, 6], dtype="float16" + ) + dim = paddle.static.data(name='dim', shape=[1], dtype='int32') + offset = paddle.static.data(name='offset', shape=[1], dtype='int32') def attr_shape_type(): out = paddle.crop(input1, shape=3) diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py index 65bcd38439d..f172ce7c9e9 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py @@ -303,10 +303,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[self.N, self.C], dtype=self.dtype ) - label = fluid.data( + label = paddle.static.data( name='label', shape=[self.N, self.C], dtype=self.dtype ) @@ -399,13 +399,15 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[self.N, self.C], dtype=self.dtype ) - label = fluid.data( + label = paddle.static.data( name='label', shape=[self.N, self.C], dtype=self.dtype ) - weight = fluid.data(name='weight', shape=[self.C], dtype=self.dtype) + weight = paddle.static.data( + name='weight', shape=[self.C], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction=self.reduction, soft_label=True @@ -488,10 +490,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[self.N, self.C], dtype=self.dtype ) - label = fluid.data( + label = paddle.static.data( name='label', shape=[self.N, self.C], dtype=self.dtype ) @@ -572,13 +574,15 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[self.N, self.C], dtype=self.dtype ) - label = fluid.data( + label = paddle.static.data( name='label', shape=[self.N, self.C], dtype=self.dtype ) - weight = fluid.data(name='weight', shape=[self.C], dtype=self.dtype) + weight = paddle.static.data( + name='weight', shape=[self.C], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction=self.reduction, soft_label=True @@ -671,12 +675,12 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[self.N, self.H, self.W, self.C], dtype=self.dtype, ) - label = fluid.data( + label = paddle.static.data( name='label', shape=[self.N, self.H, self.W, self.C], dtype=self.dtype, @@ -769,17 +773,19 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[self.N, self.H, self.W, self.C], dtype=self.dtype, ) - label = fluid.data( + label = paddle.static.data( name='label', shape=[self.N, self.H, self.W, self.C], dtype=self.dtype, ) - weight = fluid.data(name='weight', shape=[self.C], dtype=self.dtype) + weight = paddle.static.data( + name='weight', shape=[self.C], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction=self.reduction, soft_label=True @@ -816,8 +822,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[2, 4], dtype=self.dtype) - label = fluid.data(name='label', shape=[2], dtype='int64') + input = paddle.static.data( + name='input', shape=[2, 4], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[2], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(ignore_index=0) ret = cross_entropy_loss(input, label) @@ -862,8 +870,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[N, C], dtype=self.dtype) - label = fluid.data(name='label', shape=[N], dtype='int64') + input = paddle.static.data( + name='input', shape=[N, C], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[N], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( ignore_index=-1 ) @@ -910,9 +920,11 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[N, C], dtype=self.dtype) - label = fluid.data(name='label', shape=[N], dtype='int64') - weight = fluid.data( + input = paddle.static.data( + name='input', shape=[N, C], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[N], dtype='int64') + weight = paddle.static.data( name='weight', shape=[C], dtype=self.dtype ) # weight for each class cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( @@ -989,9 +1001,11 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[2, 4], dtype=self.dtype) - label = fluid.data(name='label', shape=[2], dtype='int64') - weight = fluid.data( + input = paddle.static.data( + name='input', shape=[2, 4], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[2], dtype='int64') + weight = paddle.static.data( name='weight', shape=[4], dtype=self.dtype ) # weight for each class cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(weight=weight) @@ -1042,9 +1056,13 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) - label = fluid.data(name='label', shape=[100], dtype='int64') - weight = fluid.data(name='weight', shape=[200], dtype=self.dtype) + input = paddle.static.data( + name='input', shape=[100, 200], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[100], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[200], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction='sum' ) @@ -1092,9 +1110,13 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) - label = fluid.data(name='label', shape=[100], dtype='int64') - weight = fluid.data(name='weight', shape=[200], dtype=self.dtype) + input = paddle.static.data( + name='input', shape=[100, 200], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[100], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[200], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction='none' @@ -1144,9 +1166,13 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) - label = fluid.data(name='label', shape=[100], dtype='int64') - weight = fluid.data(name='weight', shape=[200], dtype=self.dtype) + input = paddle.static.data( + name='input', shape=[100, 200], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[100], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[200], dtype=self.dtype + ) ret = paddle.nn.functional.cross_entropy( input, label, weight=weight, reduction='none' ) @@ -1192,8 +1218,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) - label = fluid.data(name='label', shape=[100], dtype='int64') + input = paddle.static.data( + name='input', shape=[100, 200], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[100], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss() ret = cross_entropy_loss(input, label) exe = fluid.Executor(place) @@ -1228,8 +1256,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) - label = fluid.data(name='label', shape=[100], dtype='int64') + input = paddle.static.data( + name='input', shape=[100, 200], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[100], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='sum' ) @@ -1268,8 +1298,10 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype=self.dtype) - label = fluid.data(name='label', shape=[100], dtype='int64') + input = paddle.static.data( + name='input', shape=[100, 200], dtype=self.dtype + ) + label = paddle.static.data(name='label', shape=[100], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='none' ) @@ -1316,11 +1348,15 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype=self.dtype) + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction='none' ) @@ -1374,11 +1410,15 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 3, 2, 2], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype=self.dtype) + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction='mean', axis=1 ) @@ -1460,11 +1500,15 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype=self.dtype) + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction='mean' ) @@ -1516,11 +1560,15 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype=self.dtype) + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype=self.dtype + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction='sum' ) @@ -1570,10 +1618,12 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='none' ) @@ -1621,10 +1671,12 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='mean' ) @@ -1673,10 +1725,12 @@ class CrossEntropyLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) - label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64') + label = paddle.static.data( + name='label', shape=[2, 2, 2], dtype='int64' + ) cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='sum' ) @@ -1772,11 +1826,13 @@ class TestCrossEntropyFAPIError(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[2, 4], dtype='float32' ) - label = fluid.data(name='label', shape=[2], dtype='int64') - weight = fluid.data( + label = paddle.static.data( + name='label', shape=[2], dtype='int64' + ) + weight = paddle.static.data( name='weight', shape=[3], dtype='float32' ) # weight for each class cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( diff --git a/python/paddle/fluid/tests/unittests/test_cross_op.py b/python/paddle/fluid/tests/unittests/test_cross_op.py index a886c2e27be..5ff34a337c4 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_op.py @@ -123,8 +123,8 @@ class TestCrossAPI(unittest.TestCase): # case 3: with program_guard(Program(), Program()): - x = fluid.data(name="x", shape=[-1, 3], dtype="float32") - y = fluid.data(name='y', shape=[-1, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[-1, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[-1, 3], dtype='float32') y_1 = paddle.cross(x, y, name='result') self.assertEqual(('result' in y_1.name), True) diff --git a/python/paddle/fluid/tests/unittests/test_cumprod_op.py b/python/paddle/fluid/tests/unittests/test_cumprod_op.py index 2a97e8af44d..544b075ce0d 100644 --- a/python/paddle/fluid/tests/unittests/test_cumprod_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumprod_op.py @@ -166,7 +166,7 @@ class TestCumprodAPI(unittest.TestCase): def run(place): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape, dtype=self.dtype) + x = paddle.static.data('X', self.shape, dtype=self.dtype) out = paddle.cumprod(x, -2) exe = paddle.static.Executor(place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) diff --git a/python/paddle/fluid/tests/unittests/test_data.py b/python/paddle/fluid/tests/unittests/test_data.py index 0e2223767dd..9bbf16fd618 100644 --- a/python/paddle/fluid/tests/unittests/test_data.py +++ b/python/paddle/fluid/tests/unittests/test_data.py @@ -15,30 +15,12 @@ import unittest import paddle -import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import Program, program_guard -class TestApiDataError(unittest.TestCase): - def test_fluid_data(self): - with program_guard(Program(), Program()): - - # 1. The type of 'name' in fluid.data must be str. - def test_name_type(): - fluid.data(name=1, shape=[2, 25], dtype="bool") - - self.assertRaises(TypeError, test_name_type) - - # 2. The type of 'shape' in fluid.data must be list or tuple. - def test_shape_type(): - fluid.data(name='data1', shape=2, dtype="bool") - - self.assertRaises(TypeError, test_shape_type) - - class TestApiStaticDataError(unittest.TestCase): - def test_fluid_dtype(self): + def test_dtype(self): with program_guard(Program(), Program()): x1 = paddle.static.data(name="x1", shape=[2, 25]) self.assertEqual(x1.dtype, core.VarDesc.VarType.FP32) @@ -50,16 +32,16 @@ class TestApiStaticDataError(unittest.TestCase): x3 = paddle.static.data(name="x3", shape=[2, 25]) self.assertEqual(x3.dtype, core.VarDesc.VarType.FP64) - def test_fluid_data(self): + def test_error(self): with program_guard(Program(), Program()): - # 1. The type of 'name' in fluid.data must be str. + # 1. The type of 'name' in paddle.static.data must be str. def test_name_type(): paddle.static.data(name=1, shape=[2, 25], dtype="bool") self.assertRaises(TypeError, test_name_type) - # 2. The type of 'shape' in fluid.data must be list or tuple. + # 2. The type of 'shape' in paddle.static.data must be list or tuple. def test_shape_type(): paddle.static.data(name='data1', shape=2, dtype="bool") @@ -84,10 +66,7 @@ class TestApiErrorWithDynamicMode(unittest.TestCase): def test_error(self): with program_guard(Program(), Program()): paddle.disable_static() - self.assertRaises(AssertionError, fluid.data, 'a', [2, 25]) - self.assertRaises( - AssertionError, paddle.static.data, 'c', shape=[2, 25] - ) + self.assertRaises(AssertionError, paddle.static.data, 'a', [2, 25]) paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_data_norm_op.py b/python/paddle/fluid/tests/unittests/test_data_norm_op.py index 4cf038334f7..8fcd4d2af9b 100644 --- a/python/paddle/fluid/tests/unittests/test_data_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_data_norm_op.py @@ -542,7 +542,7 @@ class TestDataNormOpErrorr(unittest.TestCase): # The size of input in data_norm should not be 0. def test_0_size(): paddle.enable_static() - x = fluid.data(name='x', shape=[0, 3], dtype='float32') + x = paddle.static.data(name='x', shape=[0, 3], dtype='float32') out = paddle.static.nn.data_norm(x, slot_dim=1) cpu = fluid.core.CPUPlace() exe = fluid.Executor(cpu) diff --git a/python/paddle/fluid/tests/unittests/test_dataloader_early_reset.py b/python/paddle/fluid/tests/unittests/test_dataloader_early_reset.py index f84eb481472..504d37d42d9 100644 --- a/python/paddle/fluid/tests/unittests/test_dataloader_early_reset.py +++ b/python/paddle/fluid/tests/unittests/test_dataloader_early_reset.py @@ -46,7 +46,7 @@ class TestDataLoaderEarlyReset(unittest.TestCase): return fluid.CPUPlace() def create_data_loader(self): - self.x = fluid.data(name='x', shape=[None, 32], dtype='float32') + self.x = paddle.static.data(name='x', shape=[None, 32], dtype='float32') return fluid.io.DataLoader.from_generator( feed_list=[self.x], capacity=10, iterable=self.iterable ) diff --git a/python/paddle/fluid/tests/unittests/test_dataloader_keep_order.py b/python/paddle/fluid/tests/unittests/test_dataloader_keep_order.py index 70e90cd8eb5..66085a176a2 100644 --- a/python/paddle/fluid/tests/unittests/test_dataloader_keep_order.py +++ b/python/paddle/fluid/tests/unittests/test_dataloader_keep_order.py @@ -43,7 +43,9 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): self.initParameters() def build_network(self, places): - input_data = fluid.data(shape=self.shape, dtype='float32', name="input") + input_data = paddle.static.data( + shape=self.shape, dtype='float32', name="input" + ) loader = fluid.io.DataLoader.from_generator( capacity=16, feed_list=[input_data], iterable=self.iterable ) diff --git a/python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py b/python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py index 9538e1895cf..9d0d63a2e88 100644 --- a/python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py +++ b/python/paddle/fluid/tests/unittests/test_dataloader_unkeep_order.py @@ -49,7 +49,9 @@ class DataLoaderKeepOrderTestBase(unittest.TestCase): self.visited = set() def build_network(self, places): - input_data = fluid.data(shape=self.shape, dtype='float32', name="input") + input_data = paddle.static.data( + shape=self.shape, dtype='float32', name="input" + ) loader = fluid.io.DataLoader.from_generator( capacity=16, feed_list=[input_data], iterable=self.iterable ) diff --git a/python/paddle/fluid/tests/unittests/test_dataset.py b/python/paddle/fluid/tests/unittests/test_dataset.py index d04cb0965b3..dddc91e2b88 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset.py +++ b/python/paddle/fluid/tests/unittests/test_dataset.py @@ -744,7 +744,7 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.data( + var = paddle.static.data( name=slot, shape=[None, 1], dtype="int64", lod_level=1 ) slots_vars.append(var) diff --git a/python/paddle/fluid/tests/unittests/test_deg2rad.py b/python/paddle/fluid/tests/unittests/test_deg2rad.py index 5d6d9ac646b..9bede17461d 100644 --- a/python/paddle/fluid/tests/unittests/test_deg2rad.py +++ b/python/paddle/fluid/tests/unittests/test_deg2rad.py @@ -36,7 +36,9 @@ class TestDeg2radAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x = fluid.data(name='input', dtype=self.x_dtype, shape=self.x_shape) + x = paddle.static.data( + name='input', dtype=self.x_dtype, shape=self.x_shape + ) out = paddle.deg2rad(x) place = ( diff --git a/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py b/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py index cb417eac442..63c091d1b0c 100755 --- a/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py +++ b/python/paddle/fluid/tests/unittests/test_deprecated_decorator.py @@ -19,7 +19,6 @@ import warnings import numpy as np import paddle -import paddle.fluid as fluid import paddle.utils.deprecated as deprecated from paddle import _legacy_C_ops @@ -64,30 +63,10 @@ def get_warning_index(api): class TestDeprecatedDocorator(unittest.TestCase): """ tests for paddle's Deprecated Docorator. - test_fluid_data: test for old fluid.data API. test_new_multiply: test for new api, which should not insert warning information. test_ops_elementwise_mul: test for C++ elementwise_mul op, which should not insert warning information. """ - def test_fluid_data(self): - """ - test old fluid elementwise_mul api, it should fire Warinng function, - which insert the Warinng info on top of API's doc string. - """ - paddle.enable_static() - # Initialization - x = fluid.data(name='x', shape=[3, 2, 1], dtype='float32') - - # expected - expected = LOWEST_WARNING_POSTION - - # captured - captured = get_warning_index(fluid.data) - paddle.disable_static() - - # testting - self.assertGreater(expected, captured) - def test_new_multiply(self): """ Test for new multiply api, expected result should be False. diff --git a/python/paddle/fluid/tests/unittests/test_determinant_op.py b/python/paddle/fluid/tests/unittests/test_determinant_op.py index 332a9f010a6..ade000cda87 100644 --- a/python/paddle/fluid/tests/unittests/test_determinant_op.py +++ b/python/paddle/fluid/tests/unittests/test_determinant_op.py @@ -69,7 +69,7 @@ class TestDeterminantAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) out = paddle.linalg.det(x) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) @@ -126,7 +126,7 @@ class TestSlogDeterminantAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) out = paddle.linalg.slogdet(x) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) diff --git a/python/paddle/fluid/tests/unittests/test_diag_embed.py b/python/paddle/fluid/tests/unittests/test_diag_embed.py index 96241df0691..0c75197fd28 100644 --- a/python/paddle/fluid/tests/unittests/test_diag_embed.py +++ b/python/paddle/fluid/tests/unittests/test_diag_embed.py @@ -17,6 +17,7 @@ import unittest import numpy as np from eager_op_test import OpTest, paddle_static_guard +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.nn.functional as F @@ -53,7 +54,9 @@ class TestDiagEmbedAPICase(unittest.TestCase): def test_case1(self): with paddle_static_guard(): diag_embed = np.random.randn(2, 3, 4).astype('float32') - data1 = fluid.data(name='data1', shape=[2, 3, 4], dtype='float32') + data1 = paddle.static.data( + name='data1', shape=[2, 3, 4], dtype='float32' + ) out1 = F.diag_embed(data1) out2 = F.diag_embed(data1, offset=1, dim1=-2, dim2=3) diff --git a/python/paddle/fluid/tests/unittests/test_diagonal_op.py b/python/paddle/fluid/tests/unittests/test_diagonal_op.py index 9e56e3c9c75..1cff06a1808 100644 --- a/python/paddle/fluid/tests/unittests/test_diagonal_op.py +++ b/python/paddle/fluid/tests/unittests/test_diagonal_op.py @@ -137,7 +137,7 @@ class TestDiagonalAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) out = paddle.diagonal(x) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) diff --git a/python/paddle/fluid/tests/unittests/test_diff_op.py b/python/paddle/fluid/tests/unittests/test_diff_op.py index 1c1a8639ee2..7cbeee96eae 100644 --- a/python/paddle/fluid/tests/unittests/test_diff_op.py +++ b/python/paddle/fluid/tests/unittests/test_diff_op.py @@ -84,7 +84,7 @@ class TestDiffOp(unittest.TestCase): places.append(fluid.CUDAPlace(0)) for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): - x = paddle.fluid.data( + x = paddle.static.data( name="input", shape=self.input.shape, dtype=self.input.dtype ) has_pend = False @@ -92,14 +92,14 @@ class TestDiffOp(unittest.TestCase): append = None if self.prepend is not None: has_pend = True - prepend = paddle.fluid.data( + prepend = paddle.static.data( name="prepend", shape=self.prepend.shape, dtype=self.prepend.dtype, ) if self.append is not None: has_pend = True - append = paddle.fluid.data( + append = paddle.static.data( name="append", shape=self.append.shape, dtype=self.append.dtype, diff --git a/python/paddle/fluid/tests/unittests/test_dist_op.py b/python/paddle/fluid/tests/unittests/test_dist_op.py index 988665d4ef2..88d6313d980 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_op.py +++ b/python/paddle/fluid/tests/unittests/test_dist_op.py @@ -169,8 +169,12 @@ class TestDistAPI(unittest.TestCase): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - x = fluid.data(name='x', shape=[2, 3, 4, 5], dtype=self.data_type) - y = fluid.data(name='y', shape=[3, 1, 5], dtype=self.data_type) + x = paddle.static.data( + name='x', shape=[2, 3, 4, 5], dtype=self.data_type + ) + y = paddle.static.data( + name='y', shape=[3, 1, 5], dtype=self.data_type + ) p = 2 x_i = np.random.random((2, 3, 4, 5)).astype(self.data_type) y_i = np.random.random((3, 1, 5)).astype(self.data_type) diff --git a/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps0.py b/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps0.py index 5ab7ad21dbd..44ee15ea1a2 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps0.py +++ b/python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps0.py @@ -30,7 +30,9 @@ class SparseLoadOp(unittest.TestCase): def net(self, emb_array, fc_array): with fluid.unique_name.guard(): - dense_input = fluid.data('input', shape=[None, 1], dtype="int64") + dense_input = paddle.static.data( + 'input', shape=[None, 1], dtype="int64" + ) emb = fluid.layers.embedding( input=dense_input, diff --git a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_sgd.py b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_sgd.py index f2f526484ce..e29d31270c5 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_sgd.py +++ b/python/paddle/fluid/tests/unittests/test_dist_sparse_tensor_load_sgd.py @@ -46,7 +46,9 @@ class TestSparseLoadProgram(unittest.TestCase): with fluid.scope_guard(scope): with fluid.program_guard(train_program, startup_program): with fluid.unique_name.guard(): - inputs = fluid.data('input', shape=[None, 1], dtype="int64") + inputs = paddle.static.data( + 'input', shape=[None, 1], dtype="int64" + ) emb = fluid.layers.embedding( inputs, is_sparse=True, size=[10000, 128] ) diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index 306c1bd3f27..ecdc8934016 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -399,7 +399,9 @@ class TestDropoutFAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[-1, -1], dtype="float32") + input = paddle.static.data( + name="input", shape=[-1, -1], dtype="float32" + ) res1 = paddle.nn.functional.dropout(x=input, p=0.0, training=False) res2 = paddle.nn.functional.dropout( x=input, p=0.0, axis=0, training=True, mode='upscale_in_train' @@ -632,56 +634,72 @@ class TestDropoutFAPIError(unittest.TestCase): def test_dtype(): # the input dtype of dropout must be float32 or float64 # float16 only can be set on GPU place - xr = fluid.data(name='xr', shape=[3, 4, 5, 6], dtype="int32") + xr = paddle.static.data( + name='xr', shape=[3, 4, 5, 6], dtype="int32" + ) paddle.nn.functional.dropout(xr, p=0.5) self.assertRaises(TypeError, test_dtype) def test_pdtype(): # p should be int or float - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, p='0.5') self.assertRaises(TypeError, test_pdtype) def test_pvalue(): # p should be 0.<=p<=1. - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, p=1.2) self.assertRaises(ValueError, test_pvalue) def test_mode(): # mode should be 'downscale_in_infer' or 'upscale_in_train' - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, mode='abc') self.assertRaises(ValueError, test_mode) def test_axis(): # axis should be int or list - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, axis=1.2) self.assertRaises(TypeError, test_axis) def test_axis_max(): # maximum of axis should less than dimensions of x - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, axis=[0, 5]) self.assertRaises(ValueError, test_axis_max) def test_axis_min(): # minimum of axis should greater equal than 0 - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, axis=[0, -1]) self.assertRaises(ValueError, test_axis_min) def test_axis_len(): # length of axis should not greater than dimensions of x - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.dropout(x2, axis=[0, 1, 2, 3, 4]) self.assertRaises(ValueError, test_axis_len) @@ -717,7 +735,7 @@ class TestDropout2DFAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 4, 5], dtype="float32" ) res1 = paddle.nn.functional.dropout2d( @@ -769,14 +787,18 @@ class TestDropout2DFAPIError(unittest.TestCase): def test_xdim(): # dimentions of x should be 4 - x = fluid.data(name='x1', shape=[2, 3, 4, 5, 6], dtype="int32") + x = paddle.static.data( + name='x1', shape=[2, 3, 4, 5, 6], dtype="int32" + ) paddle.nn.functional.dropout2d(x) self.assertRaises(ValueError, test_xdim) def test_dataformat(): # data_format should be 'NCHW' or 'NHWC' - x = fluid.data(name='x2', shape=[2, 3, 4, 5], dtype="int32") + x = paddle.static.data( + name='x2', shape=[2, 3, 4, 5], dtype="int32" + ) paddle.nn.functional.dropout2d(x, data_format='CNHW') self.assertRaises(ValueError, test_dataformat) @@ -835,7 +857,7 @@ class TestDropout3DFAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 4, 5, 6], dtype="float32" ) res1 = paddle.nn.functional.dropout3d( @@ -887,14 +909,18 @@ class TestDropout3DFAPIError(unittest.TestCase): def test_xdim(): # dimentions of x should be 5 - x = fluid.data(name='x1', shape=[2, 3, 4, 5], dtype="int32") + x = paddle.static.data( + name='x1', shape=[2, 3, 4, 5], dtype="int32" + ) paddle.nn.functional.dropout3d(x) self.assertRaises(ValueError, test_xdim) def test_dataformat(): # data_format should be 'NCDHW' or 'NDHWC' - x = fluid.data(name='x2', shape=[2, 3, 4, 5, 6], dtype="int32") + x = paddle.static.data( + name='x2', shape=[2, 3, 4, 5, 6], dtype="int32" + ) paddle.nn.functional.dropout3d(x, data_format='CNDHW') self.assertRaises(ValueError, test_dataformat) @@ -930,7 +956,9 @@ class TestAlphaDropoutFAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[40, 40], dtype="float32") + input = paddle.static.data( + name="input", shape=[40, 40], dtype="float32" + ) res1 = paddle.nn.functional.alpha_dropout(x=input, p=0.0) res2 = paddle.nn.functional.alpha_dropout( x=input, p=0.0, training=False @@ -996,21 +1024,27 @@ class TestAlphaDropoutFAPIError(unittest.TestCase): def test_dtype(): # the input dtype of dropout must be float32 or float64 - xr = fluid.data(name='xr', shape=[3, 4, 5, 6], dtype="int32") + xr = paddle.static.data( + name='xr', shape=[3, 4, 5, 6], dtype="int32" + ) paddle.nn.functional.alpha_dropout(xr) self.assertRaises(TypeError, test_dtype) def test_pdtype(): # p should be int or float - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.alpha_dropout(x2, p='0.5') self.assertRaises(TypeError, test_pdtype) def test_pvalue(): # p should be 0.<=p<=1. - x2 = fluid.data(name='x2', shape=[3, 4, 5, 6], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.alpha_dropout(x2, p=1.2) self.assertRaises(ValueError, test_pvalue) diff --git a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py index 9ea90d152da..d923ece5726 100644 --- a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py @@ -31,7 +31,9 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): x = paddle.assign( np.random.rand(batch_size, beam_size, 32).astype("float32") ) - indices = fluid.data(shape=[None, beam_size], dtype="int64", name="indices") + indices = paddle.static.data( + shape=[None, beam_size], dtype="int64", name="indices" + ) step_idx = paddle.tensor.fill_constant( shape=[1], dtype="int64", value=0, force_cpu=True ) diff --git a/python/paddle/fluid/tests/unittests/test_eig_op.py b/python/paddle/fluid/tests/unittests/test_eig_op.py index 2860e078e74..5e412908e61 100644 --- a/python/paddle/fluid/tests/unittests/test_eig_op.py +++ b/python/paddle/fluid/tests/unittests/test_eig_op.py @@ -244,7 +244,9 @@ class TestEigStatic(TestEigOp): input_np = np.random.random([3, 3]).astype('complex') expect_val, expect_vec = np.linalg.eig(input_np) with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[3, 3], dtype='complex') + input = paddle.static.data( + name="input", shape=[3, 3], dtype='complex' + ) act_val, act_vec = paddle.linalg.eig(input) exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py index c742a02b9e1..cff2c505cba 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py @@ -570,8 +570,8 @@ class TestAddApi(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = self._executed_api(x, y, name='add_res') self.assertEqual(('add_res' in y_1.name), True) @@ -585,8 +585,8 @@ class TestAddApi(unittest.TestCase): "y": np.array([1, 5, 2]).astype('float32'), } - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') + x = paddle.static.data(name="x", shape=[3], dtype='float32') + y = paddle.static.data(name="y", shape=[3], dtype='float32') z = self._executed_api(x, y) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py index 2138ac33a77..af06373ea0a 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py @@ -485,7 +485,7 @@ create_test_fp16_class(TestElementwiseDivOpXsizeLessThanYsize) class TestElementwiseDivBroadcast(unittest.TestCase): def test_shape_with_batch_sizes(self): with fluid.program_guard(fluid.Program()): - x_var = fluid.data( + x_var = paddle.static.data( name='x', dtype='float32', shape=[None, 3, None, None] ) one = 2.0 @@ -499,8 +499,8 @@ class TestElementwiseDivBroadcast(unittest.TestCase): class TestDivideOp(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = paddle.divide(x, y, name='div_res') self.assertEqual(('div_res' in y_1.name), True) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py index 29402bcf49a..7aadfbd01e8 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py @@ -99,8 +99,8 @@ class TestFloorDivideOp(unittest.TestCase): def test_name(self): with paddle_static_guard(): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="int64") - y = fluid.data(name='y', shape=[2, 3], dtype='int64') + x = paddle.static.data(name="x", shape=[2, 3], dtype="int64") + y = paddle.static.data(name='y', shape=[2, 3], dtype='int64') y_1 = paddle.floor_divide(x, y, name='div_res') self.assertEqual(('div_res' in y_1.name), True) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py index 0264dd8a54b..d61b2bcce1f 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py @@ -133,8 +133,8 @@ class TestRemainderOp(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="int64") - y = fluid.data(name='y', shape=[2, 3], dtype='int64') + x = paddle.static.data(name="x", shape=[2, 3], dtype="int64") + y = paddle.static.data(name='y', shape=[2, 3], dtype='int64') y_1 = self._executed_api(x, y, name='div_res') self.assertEqual(('div_res' in y_1.name), True) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py index 89083641abc..a076f2d02eb 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py @@ -472,8 +472,8 @@ class TestSubtractApi(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = self._executed_api(x, y, name='subtract_res') self.assertEqual(('subtract_res' in y_1.name), True) @@ -487,8 +487,8 @@ class TestSubtractApi(unittest.TestCase): "y": np.array([1, 5, 2]).astype('float32'), } - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') + x = paddle.static.data(name="x", shape=[3], dtype='float32') + y = paddle.static.data(name="y", shape=[3], dtype='float32') z = self._executed_api(x, y) place = fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_ema.py b/python/paddle/fluid/tests/unittests/test_ema.py index 117acb13249..62100c31452 100644 --- a/python/paddle/fluid/tests/unittests/test_ema.py +++ b/python/paddle/fluid/tests/unittests/test_ema.py @@ -32,7 +32,9 @@ class TestExponentialMovingAverage(unittest.TestCase): self._startup_prog = fluid.Program() with fluid.program_guard(self._train_program, self._startup_prog): with fluid.unique_name.guard(): - data = fluid.data(name='x', shape=[-1, 5], dtype='float32') + data = paddle.static.data( + name='x', shape=[-1, 5], dtype='float32' + ) hidden = paddle.static.nn.fc( x=data, size=10, weight_attr=self._param_name ) diff --git a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py index 7c5776cb4ab..4f709367c18 100644 --- a/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_embedding_id_stop_gradient.py @@ -49,8 +49,8 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase): scope = fluid.Scope() with fluid.program_guard(main_program, startup_program): with fluid.scope_guard(scope): - x_1 = fluid.data(name='x1', shape=[4, 1], dtype='int64') - x_2 = fluid.data(name='x2', shape=[4, 1], dtype='int64') + x_1 = paddle.static.data(name='x1', shape=[4, 1], dtype='int64') + x_2 = paddle.static.data(name='x2', shape=[4, 1], dtype='int64') x = paddle.concat([x_1, x_2], axis=-1) for _ in range(self.reshape_times): diff --git a/python/paddle/fluid/tests/unittests/test_empty_op.py b/python/paddle/fluid/tests/unittests/test_empty_op.py index d13b7f53d36..104495887e0 100644 --- a/python/paddle/fluid/tests/unittests/test_empty_op.py +++ b/python/paddle/fluid/tests/unittests/test_empty_op.py @@ -242,13 +242,13 @@ class TestEmptyAPI(unittest.TestCase): positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 3) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 3) - shape_tensor_int32 = fluid.data( + shape_tensor_int32 = paddle.static.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) - shape_tensor_int64 = fluid.data( + shape_tensor_int64 = paddle.static.data( name="shape_tensor_int64", shape=[2], dtype="int64" ) - shape_tensor_unknown = fluid.data( + shape_tensor_unknown = paddle.static.data( name="shape_tensor_unknown", shape=[-1], dtype="int64" ) diff --git a/python/paddle/fluid/tests/unittests/test_erfinv_op.py b/python/paddle/fluid/tests/unittests/test_erfinv_op.py index 695813bb247..9f6d9ca01df 100644 --- a/python/paddle/fluid/tests/unittests/test_erfinv_op.py +++ b/python/paddle/fluid/tests/unittests/test_erfinv_op.py @@ -77,7 +77,7 @@ class TestErfinvAPI(unittest.TestCase): def run(place): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('x', [1, 5], dtype=self.dtype) + x = paddle.static.data('x', [1, 5], dtype=self.dtype) out = paddle.erfinv(x) exe = paddle.static.Executor(place) res = exe.run(feed={'x': self.x.reshape([1, 5])}) diff --git a/python/paddle/fluid/tests/unittests/test_executor_check_feed.py b/python/paddle/fluid/tests/unittests/test_executor_check_feed.py index 700bbfa95d1..6f2408b376f 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_check_feed.py +++ b/python/paddle/fluid/tests/unittests/test_executor_check_feed.py @@ -20,9 +20,9 @@ import paddle.fluid as fluid class TestExecutor(unittest.TestCase): def net(self): - lr = fluid.data(name="lr", shape=[1], dtype='float32') - x = fluid.data(name="x", shape=[None, 1], dtype='float32') - y = fluid.data(name="y", shape=[None, 1], dtype='float32') + lr = paddle.static.data(name="lr", shape=[1], dtype='float32') + x = paddle.static.data(name="x", shape=[None, 1], dtype='float32') + y = paddle.static.data(name="y", shape=[None, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1) cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) diff --git a/python/paddle/fluid/tests/unittests/test_executor_feed_non_tensor.py b/python/paddle/fluid/tests/unittests/test_executor_feed_non_tensor.py index acdb8b78549..b3b6880bcd1 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_feed_non_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_executor_feed_non_tensor.py @@ -22,9 +22,9 @@ import paddle.fluid as fluid class TestExecutor(unittest.TestCase): def net(self): - lr = fluid.data(name="lr", shape=[1], dtype='float32') - x = fluid.data(name="x", shape=[None, 1], dtype='float32') - y = fluid.data(name="y", shape=[None, 1], dtype='float32') + lr = paddle.static.data(name="lr", shape=[1], dtype='float32') + x = paddle.static.data(name="x", shape=[None, 1], dtype='float32') + y = paddle.static.data(name="y", shape=[None, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1) cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) diff --git a/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py b/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py index 8828b8b0e81..0298c0ebc20 100644 --- a/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py +++ b/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py @@ -28,8 +28,8 @@ np.random.seed(123) class TestFeedData(unittest.TestCase): ''' - Test paddle.fluid.data feeds with different shape and types. - Note: paddle.fluid.data is not paddle.static.data. + Test paddle.static.data feeds with different shape and types. + Note: paddle.static.data is not paddle.static.data. ''' def setUp(self): @@ -53,8 +53,12 @@ class TestFeedData(unittest.TestCase): return self.data_batch_size def _simple_fc_net(self, in_size, label_size, class_num, hidden_sizes): - in_data = fluid.data(name="data", dtype='float32', shape=in_size) - label = fluid.data(name='label', dtype='int64', shape=label_size) + in_data = paddle.static.data( + name="data", dtype='float32', shape=in_size + ) + label = paddle.static.data( + name='label', dtype='int64', shape=label_size + ) hidden = in_data for hidden_size in hidden_sizes: diff --git a/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py b/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py index c20f4ebcc24..4f1c26dfc4b 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py @@ -27,8 +27,12 @@ class TestFetchLoDTensorArray(unittest.TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main_program, startup_program): i = layers.zeros(shape=[1], dtype='int64') - img = fluid.data(name='image', shape=[-1, 784], dtype='float32') - label = fluid.data(name='label', shape=[-1, 1], dtype='int64') + img = paddle.static.data( + name='image', shape=[-1, 784], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' + ) loss = simple_fc_net_with_inputs(img, label, class_num=10) loss = simple_fc_net() opt = fluid.optimizer.SGD(learning_rate=0.001) diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py index 56a703cf08c..4cd77d8968b 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py @@ -285,10 +285,10 @@ class TestFillConstantAPI(unittest.TestCase): positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) - shape_tensor_int32 = fluid.data( + shape_tensor_int32 = paddle.static.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) - shape_tensor_int64 = fluid.data( + shape_tensor_int64 = paddle.static.data( name="shape_tensor_int64", shape=[2], dtype="int64" ) @@ -454,7 +454,7 @@ class TestFillConstantOpError(unittest.TestCase): # The shape dtype of fill_constant_op must be int32 or int64. def test_shape_tensor_dtype(): - shape = fluid.data( + shape = paddle.static.data( name="shape_tensor", shape=[2], dtype="float32" ) paddle.tensor.fill_constant( @@ -464,7 +464,7 @@ class TestFillConstantOpError(unittest.TestCase): self.assertRaises(TypeError, test_shape_tensor_dtype) def test_shape_tensor_list_dtype(): - shape = fluid.data( + shape = paddle.static.data( name="shape_tensor_list", shape=[1], dtype="bool" ) paddle.tensor.fill_constant( diff --git a/python/paddle/fluid/tests/unittests/test_fleet_pyramid_hash.py b/python/paddle/fluid/tests/unittests/test_fleet_pyramid_hash.py index e34b98747f7..63c49f40123 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_pyramid_hash.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_pyramid_hash.py @@ -30,7 +30,9 @@ class TestPyramidHashOpApi(unittest.TestCase): num_voc = 128 embed_dim = 64 x_shape, x_lod = [16, 10], [[3, 5, 2, 6]] - x = fluid.data(name='x', shape=x_shape, dtype='int32', lod_level=1) + x = paddle.static.data( + name='x', shape=x_shape, dtype='int32', lod_level=1 + ) hash_embd = fluid.contrib.layers.search_pyramid_hash( input=x, num_emb=embed_dim, diff --git a/python/paddle/fluid/tests/unittests/test_flip.py b/python/paddle/fluid/tests/unittests/test_flip.py index 687132a11a1..6ea01f51bf9 100644 --- a/python/paddle/fluid/tests/unittests/test_flip.py +++ b/python/paddle/fluid/tests/unittests/test_flip.py @@ -32,7 +32,9 @@ class TestFlipOp_API(unittest.TestCase): train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): axis = [0] - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.flip(input, axis) output = paddle.flip(output, -1) output = output.flip(0) @@ -201,13 +203,17 @@ class TestFlipError(unittest.TestCase): paddle.enable_static() def test_axis_rank(): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.flip(input, axis=[[0]]) self.assertRaises(TypeError, test_axis_rank) def test_axis_rank2(): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.flip(input, axis=[[0, 0], [1, 1]]) self.assertRaises(TypeError, test_axis_rank2) diff --git a/python/paddle/fluid/tests/unittests/test_frac_api.py b/python/paddle/fluid/tests/unittests/test_frac_api.py index a8395e5d458..c9c1feec992 100644 --- a/python/paddle/fluid/tests/unittests/test_frac_api.py +++ b/python/paddle/fluid/tests/unittests/test_frac_api.py @@ -44,7 +44,7 @@ class TestFracAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with program_guard(Program()): - input = fluid.data('X', self.x_np.shape, self.x_np.dtype) + input = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.frac(input) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -105,7 +105,7 @@ class TestFracError(unittest.TestCase): def test_static_error(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [5, 5], 'bool') + x = paddle.static.data('X', [5, 5], 'bool') self.assertRaises(TypeError, paddle.frac, x) def test_dygraph_error(self): diff --git a/python/paddle/fluid/tests/unittests/test_frexp_api.py b/python/paddle/fluid/tests/unittests/test_frexp_api.py index 230afc993ae..f14216ef718 100644 --- a/python/paddle/fluid/tests/unittests/test_frexp_api.py +++ b/python/paddle/fluid/tests/unittests/test_frexp_api.py @@ -39,7 +39,7 @@ class TestFrexpAPI(unittest.TestCase): # 开启静态图模式 paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - input_data = paddle.fluid.data( + input_data = paddle.static.data( 'X', self.x_np.shape, self.x_np.dtype ) out = paddle.frexp(input_data) diff --git a/python/paddle/fluid/tests/unittests/test_full_like_op.py b/python/paddle/fluid/tests/unittests/test_full_like_op.py index 3a30c070338..5c2dc0903cb 100644 --- a/python/paddle/fluid/tests/unittests/test_full_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_like_op.py @@ -40,7 +40,7 @@ class TestFullOp(unittest.TestCase): train_program = Program() with program_guard(train_program, startup_program): fill_value = 2.0 - input = paddle.fluid.data( + input = paddle.static.data( name='input', dtype='float32', shape=[2, 3] ) output = paddle.full_like(input, fill_value) @@ -88,7 +88,7 @@ class TestFullOpError(unittest.TestCase): with program_guard(Program(), Program()): # for ci coverage - input_data = paddle.fluid.data( + input_data = paddle.static.data( name='input', dtype='float32', shape=[2, 3] ) output = paddle.full_like(input_data, 2.0) diff --git a/python/paddle/fluid/tests/unittests/test_full_op.py b/python/paddle/fluid/tests/unittests/test_full_op.py index 670cf2acb76..0040b4ed289 100644 --- a/python/paddle/fluid/tests/unittests/test_full_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_op.py @@ -27,11 +27,11 @@ class TestFullAPI(unittest.TestCase): positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) - shape_tensor_int32 = fluid.data( + shape_tensor_int32 = paddle.static.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) - shape_tensor_int64 = fluid.data( + shape_tensor_int64 = paddle.static.data( name="shape_tensor_int64", shape=[2], dtype="int64" ) @@ -167,7 +167,7 @@ class TestFullOpError(unittest.TestCase): # The shape dtype of full op must be int32 or int64. def test_shape_tensor_dtype(): - shape = fluid.data( + shape = paddle.static.data( name="shape_tensor", shape=[2], dtype="float32" ) paddle.full(shape=shape, dtype="float32", fill_value=1) @@ -175,7 +175,7 @@ class TestFullOpError(unittest.TestCase): self.assertRaises(TypeError, test_shape_tensor_dtype) def test_shape_tensor_list_dtype(): - shape = fluid.data( + shape = paddle.static.data( name="shape_tensor_list", shape=[1], dtype="bool" ) paddle.full(shape=[shape, 2], dtype="float32", fill_value=1) diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv2d.py b/python/paddle/fluid/tests/unittests/test_functional_conv2d.py index c78f6c35b06..2d8484cbee1 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv2d.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv2d.py @@ -77,13 +77,13 @@ class TestFunctionalConv2D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = fluid.data( + x = paddle.static.data( "input", (-1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1), dtype=self.dtype, @@ -114,22 +114,24 @@ class TestFunctionalConv2D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight.shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias.shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias.shape, dtype=self.dtype + ) y = F.conv2d( x, weight, @@ -234,22 +236,24 @@ class TestFunctionalConv2DError(TestCase): with fluid.program_guard(main, start): self.channel_last = self.data_format == "NHWC" if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias_shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias_shape, dtype=self.dtype + ) y = F.conv2d( x, weight, @@ -505,7 +509,9 @@ class TestFunctionalConv2DErrorCase12(TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - x = fluid.data("input", self.input.shape, dtype=paddle.float32) + x = paddle.static.data( + "input", self.input.shape, dtype=paddle.float32 + ) y = paddle.static.nn.conv2d( x, self.num_filters, diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py b/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py index 2981748cf61..dd708614b88 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv2d_transpose.py @@ -78,13 +78,13 @@ class TestFunctionalConv2D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = fluid.data( + x = paddle.static.data( "input", (-1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1), dtype=self.dtype, @@ -115,22 +115,24 @@ class TestFunctionalConv2D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight.shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias.shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias.shape, dtype=self.dtype + ) y = F.conv2d_transpose( x, weight, @@ -230,22 +232,24 @@ class TestFunctionalConv2DError(TestCase): with fluid.program_guard(main, start): self.channel_last = self.data_format == "NHWC" if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias_shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias_shape, dtype=self.dtype + ) y = F.conv2d_transpose( x, weight, @@ -513,7 +517,9 @@ class TestFunctionalConv2DErrorCase10(TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - x = fluid.data("input", self.input.shape, dtype=paddle.float32) + x = paddle.static.data( + "input", self.input.shape, dtype=paddle.float32 + ) y = paddle.static.nn.conv2d( x, self.num_filters, diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv3d.py b/python/paddle/fluid/tests/unittests/test_functional_conv3d.py index 5e867036dd4..3f3415afdbf 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv3d.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv3d.py @@ -77,13 +77,13 @@ class TestFunctionalConv3D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = fluid.data( + x = paddle.static.data( "input", (-1, -1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1, -1), dtype=self.dtype, @@ -114,22 +114,24 @@ class TestFunctionalConv3D(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight.shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias.shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias.shape, dtype=self.dtype + ) y = F.conv3d( x, weight, @@ -234,22 +236,24 @@ class TestFunctionalConv3DError(TestCase): with fluid.program_guard(main, start): self.channel_last = self.data_format == "NDHWC" if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias_shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias_shape, dtype=self.dtype + ) y = F.conv3d( x, weight, @@ -480,7 +484,9 @@ class TestFunctionalConv3DErrorCase11(TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - x = fluid.data("input", self.input.shape, dtype=paddle.float32) + x = paddle.static.data( + "input", self.input.shape, dtype=paddle.float32 + ) y = paddle.static.nn.conv3d( x, self.num_filters, diff --git a/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py b/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py index 7a8549b1240..22aaeb02a92 100644 --- a/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py +++ b/python/paddle/fluid/tests/unittests/test_functional_conv3d_transpose.py @@ -78,13 +78,13 @@ class TestFunctionalConv3DTranspose(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = fluid.data( + x = paddle.static.data( "input", (-1, -1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1, -1), dtype=self.dtype, @@ -116,22 +116,24 @@ class TestFunctionalConv3DTranspose(TestCase): with fluid.unique_name.guard(): with fluid.program_guard(main, start): if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight.shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias.shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias.shape, dtype=self.dtype + ) y = F.conv3d_transpose( x, weight, @@ -235,22 +237,24 @@ class TestFunctionalConv3DTransposeError(TestCase): with fluid.program_guard(main, start): self.channel_last = self.data_format == "NDHWC" if self.channel_last: - x = x = fluid.data( + x = x = paddle.static.data( "input", (-1, -1, -1, -1, self.in_channels), dtype=self.dtype, ) else: - x = fluid.data( + x = paddle.static.data( "input", (-1, self.in_channels, -1, -1, -1), dtype=self.dtype, ) - weight = fluid.data( + weight = paddle.static.data( "weight", self.weight_shape, dtype=self.dtype ) if not self.no_bias: - bias = fluid.data("bias", self.bias_shape, dtype=self.dtype) + bias = paddle.static.data( + "bias", self.bias_shape, dtype=self.dtype + ) y = F.conv3d_transpose( x, weight, @@ -538,7 +542,9 @@ class TestFunctionalConv3DTransposeErrorCase10(TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - x = fluid.data("input", self.input.shape, dtype=paddle.float32) + x = paddle.static.data( + "input", self.input.shape, dtype=paddle.float32 + ) y = paddle.static.nn.conv3d_transpose( x, self.num_filters, diff --git a/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py index c5f7ef3dfb1..9bb0031c48f 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py @@ -95,8 +95,8 @@ class TestMNIST(TestParallelExecutorBase): class TestFuseActElewiseAddInplaceGradPass(unittest.TestCase): def build_program(self, main_program, startup_program): with paddle.static.program_guard(main_program, startup_program): - X = fluid.data(name="X", shape=[3, 3], dtype='float32') - Y = fluid.data(name="Y", shape=[3, 3], dtype='float32') + X = paddle.static.data(name="X", shape=[3, 3], dtype='float32') + Y = paddle.static.data(name="Y", shape=[3, 3], dtype='float32') Out1 = X * 5 Out2 = F.relu(Out1) prediction = paddle.tensor.math._add_with_axis(Y, Out2, axis=1) diff --git a/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py b/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py index d6a7f73a925..523413865ff 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_multi_transformer_op.py @@ -797,7 +797,7 @@ class TestFusedMultiTransformerOp(OpTest): def GetFusedMultiTransformerOutStatic(self): paddle.enable_static() - x = paddle.fluid.data('x', self.query.shape, self.query.dtype) + x = paddle.static.data('x', self.query.shape, self.query.dtype) cache_kvs, cache_kv = None, None cache_kvs_feed = None time_step = None @@ -809,7 +809,7 @@ class TestFusedMultiTransformerOp(OpTest): rotary_embs = None if self.rotary_emb_dims > 0: - rotary_embs = paddle.fluid.data( + rotary_embs = paddle.static.data( 'rotary_embs', self.rotary_embs.shape, self.rotary_embs.dtype ) @@ -867,7 +867,7 @@ class TestFusedMultiTransformerOp(OpTest): time_step_feed = self.cache_length if self.remove_padding: - seq_lens = paddle.fluid.data( + seq_lens = paddle.static.data( 'seq_lens', self.seq_lens.shape, self.seq_lens.dtype ) seq_lens_feed = self.seq_lens diff --git a/python/paddle/fluid/tests/unittests/test_gather_nd_op.py b/python/paddle/fluid/tests/unittests/test_gather_nd_op.py index d670822e940..ac6f1a32cca 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_nd_op.py @@ -237,9 +237,9 @@ class TestGatherNdError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - index = paddle.fluid.data(shape=shape, dtype='bool', name='index') - index_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='float32', name='x') + index = paddle.static.data(shape=shape, dtype='bool', name='index') + index_float = paddle.static.data( shape=shape, dtype='float32', name='index_float' ) np_x = np.random.random(shape).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_gather_op.py b/python/paddle/fluid/tests/unittests/test_gather_op.py index 538cbb6a54f..ec3c400d972 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_op.py @@ -246,9 +246,9 @@ class API_TestGather(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data('x', shape=[-1, 2], dtype='float64') - index = paddle.fluid.data('index', shape=[-1, 1], dtype='int32') - axis = paddle.fluid.data('axis', shape=[1], dtype='int32') + x = paddle.static.data('x', shape=[-1, 2], dtype='float64') + index = paddle.static.data('index', shape=[-1, 1], dtype='int32') + axis = paddle.static.data('axis', shape=[1], dtype='int32') out = paddle.gather(x, index, axis) place = paddle.CPUPlace() exe = paddle.static.Executor(place) @@ -340,10 +340,10 @@ class TestGathertError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='int8', name='x') - axis = paddle.fluid.data(shape=[1], dtype='float32', name='axis') - index = paddle.fluid.data(shape=shape, dtype='int32', name='index') - index_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='int8', name='x') + axis = paddle.static.data(shape=[1], dtype='float32', name='axis') + index = paddle.static.data(shape=shape, dtype='int32', name='index') + index_float = paddle.static.data( shape=shape, dtype='float32', name='index_float' ) @@ -371,9 +371,9 @@ class TestGathertError(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): shape = [8, 9, 6] - x = fluid.data(shape=shape, dtype='int8', name='x') - index = fluid.data(shape=shape, dtype='int32', name='mask') - index_float = fluid.data( + x = paddle.static.data(shape=shape, dtype='int8', name='x') + index = paddle.static.data(shape=shape, dtype='int32', name='mask') + index_float = paddle.static.data( shape=shape, dtype='float32', name='index_float' ) @@ -393,10 +393,10 @@ class TestGathertError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='int32', name='x') - axis = paddle.fluid.data(shape=[1], dtype='int32', name='axis') - index = paddle.fluid.data(shape=shape, dtype='int32', name='index') - index_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='int32', name='x') + axis = paddle.static.data(shape=[1], dtype='int32', name='axis') + index = paddle.static.data(shape=shape, dtype='int32', name='index') + index_float = paddle.static.data( shape=shape, dtype='float32', name='index_float' ) diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index 44a1bf1e038..6eec8d02d35 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -212,11 +212,11 @@ class TestGaussianRandomAPI(unittest.TestCase): positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2000) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 500) - shape_tensor_int32 = fluid.data( + shape_tensor_int32 = paddle.static.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) - shape_tensor_int64 = fluid.data( + shape_tensor_int64 = paddle.static.data( name="shape_tensor_int64", shape=[2], dtype="int64" ) diff --git a/python/paddle/fluid/tests/unittests/test_gcd.py b/python/paddle/fluid/tests/unittests/test_gcd.py index 6b600d9c6f1..7272cf3cff0 100644 --- a/python/paddle/fluid/tests/unittests/test_gcd.py +++ b/python/paddle/fluid/tests/unittests/test_gcd.py @@ -34,8 +34,12 @@ class TestGcdAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x = fluid.data(name='input1', dtype='int32', shape=self.x_shape) - y = fluid.data(name='input2', dtype='int32', shape=self.y_shape) + x = paddle.static.data( + name='input1', dtype='int32', shape=self.x_shape + ) + y = paddle.static.data( + name='input2', dtype='int32', shape=self.y_shape + ) out = paddle.gcd(x, y) place = ( diff --git a/python/paddle/fluid/tests/unittests/test_get_tensor_from_selected_rows_op.py b/python/paddle/fluid/tests/unittests/test_get_tensor_from_selected_rows_op.py index d1e3e6df335..8f40e391a71 100644 --- a/python/paddle/fluid/tests/unittests/test_get_tensor_from_selected_rows_op.py +++ b/python/paddle/fluid/tests/unittests/test_get_tensor_from_selected_rows_op.py @@ -16,7 +16,7 @@ import unittest import numpy as np -import paddle.fluid as fluid +import paddle import paddle.fluid.core as core from paddle.fluid import Program, program_guard from paddle.fluid.op import Operator @@ -28,7 +28,7 @@ class TestGetTensorFromSelectedRowsError(unittest.TestCase): def test_errors(self): with program_guard(Program()): - x_var = fluid.data('X', [2, 3]) + x_var = paddle.static.data('X', [2, 3]) x_data = np.random.random((2, 4)).astype("float32") def test_Variable(): diff --git a/python/paddle/fluid/tests/unittests/test_gradient_clip.py b/python/paddle/fluid/tests/unittests/test_gradient_clip.py index e67f0cf282c..59cc1f5e24b 100644 --- a/python/paddle/fluid/tests/unittests/test_gradient_clip.py +++ b/python/paddle/fluid/tests/unittests/test_gradient_clip.py @@ -80,8 +80,10 @@ class TestGradientClip(unittest.TestCase): with fluid.program_guard( main_program=prog, startup_program=startup_program ): - image = fluid.data(name="a", shape=[-1, 784], dtype='float32') - label = fluid.data(name="b", shape=[-1, 1], dtype='int64') + image = paddle.static.data( + name="a", shape=[-1, 784], dtype='float32' + ) + label = paddle.static.data(name="b", shape=[-1, 1], dtype='int64') if dtype != 'float32': image_cast = paddle.cast(image, dtype) hidden = paddle.static.nn.fc( @@ -134,10 +136,12 @@ class TestGradientClip(unittest.TestCase): with fluid.program_guard( main_program=prog, startup_program=startup_program ): - data = fluid.data( + data = paddle.static.data( name="words", shape=[-1, 1], dtype="int64", lod_level=1 ) - label = fluid.data(name="label", shape=[-1, 1], dtype="int64") + label = paddle.static.data( + name="label", shape=[-1, 1], dtype="int64" + ) cost = bow_net(data, label, self.word_dict_len) self.backward_and_optimize(cost) diff --git a/python/paddle/fluid/tests/unittests/test_grid_sample_function.py b/python/paddle/fluid/tests/unittests/test_grid_sample_function.py index 3189fd6e11f..4f8042c8bad 100644 --- a/python/paddle/fluid/tests/unittests/test_grid_sample_function.py +++ b/python/paddle/fluid/tests/unittests/test_grid_sample_function.py @@ -50,8 +50,10 @@ class GridSampleTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - x = fluid.data("x", self.x_shape, dtype=self.dtype) - grid = fluid.data("grid", self.grid_shape, dtype=self.dtype) + x = paddle.static.data("x", self.x_shape, dtype=self.dtype) + grid = paddle.static.data( + "grid", self.grid_shape, dtype=self.dtype + ) y_var = F.grid_sample( x, grid, diff --git a/python/paddle/fluid/tests/unittests/test_group_norm_op.py b/python/paddle/fluid/tests/unittests/test_group_norm_op.py index c2ca5e7976b..06e4ef863a7 100644 --- a/python/paddle/fluid/tests/unittests/test_group_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_group_norm_op.py @@ -364,11 +364,15 @@ class TestGroupNormAPI_With_NHWC(unittest.TestCase): paddle.enable_static() def test_case1(self): - data1 = fluid.data(name='data1', shape=[None, 3, 3, 4], dtype='float64') + data1 = paddle.static.data( + name='data1', shape=[None, 3, 3, 4], dtype='float64' + ) out1 = paddle.static.nn.group_norm( input=data1, groups=2, data_layout="NHWC" ) - data2 = fluid.data(name='data2', shape=[None, 4, 3, 3], dtype='float64') + data2 = paddle.static.data( + name='data2', shape=[None, 4, 3, 3], dtype='float64' + ) out2 = paddle.static.nn.group_norm( input=data2, groups=2, data_layout="NCHW" ) @@ -399,7 +403,9 @@ class TestGroupNormAPI_With_NHWC(unittest.TestCase): class TestGroupNormException(unittest.TestCase): # data_layout is not NHWC or NCHW def test_exception(self): - data = fluid.data(name='data', shape=[None, 3, 3, 4], dtype="float64") + data = paddle.static.data( + name='data', shape=[None, 3, 3, 4], dtype="float64" + ) def attr_data_format(): out = paddle.static.nn.group_norm( diff --git a/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py b/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py index 0a2725c1e3e..25ac2d822b0 100644 --- a/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_gumbel_softmax_op.py @@ -235,7 +235,7 @@ class TestGumbelSoftmaxAPI(unittest.TestCase): def test_check_api(self): # test static api with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = paddle.nn.functional.gumbel_softmax(x, hard=True) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -284,7 +284,7 @@ class TestGumbelSoftmaxOpError(unittest.TestCase): def test_dtype(): with paddle.static.program_guard(paddle.static.Program()): - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[2, 3], dtype='int32' ) paddle.nn.functional.gumbel_softmax(x_int32) diff --git a/python/paddle/fluid/tests/unittests/test_histogram_op.py b/python/paddle/fluid/tests/unittests/test_histogram_op.py index 5d4ae29ba0c..9d437e447ff 100644 --- a/python/paddle/fluid/tests/unittests/test_histogram_op.py +++ b/python/paddle/fluid/tests/unittests/test_histogram_op.py @@ -29,7 +29,9 @@ class TestHistogramOpAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - inputs = fluid.data(name='input', dtype='int64', shape=[2, 3]) + inputs = paddle.static.data( + name='input', dtype='int64', shape=[2, 3] + ) output = paddle.histogram(inputs, bins=5, min=1, max=5) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -121,7 +123,9 @@ class TestHistogramOpError(unittest.TestCase): TypeError, paddle.histogram, 1, bins=5, min=1, max=5 ) # The input type must be 'int32', 'int64', 'float32', 'float64' - x_bool = fluid.data(name='x_bool', shape=[4, 3], dtype='bool') + x_bool = paddle.static.data( + name='x_bool', shape=[4, 3], dtype='bool' + ) self.assertRaises( TypeError, paddle.histogram, x_bool, bins=5, min=1, max=5 ) diff --git a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py index abf0ba0ac26..23b30701640 100644 --- a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py +++ b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py @@ -628,13 +628,13 @@ class TestHSigmoidLossAPI(unittest.TestCase): train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - x = fluid.data('x', [-1, self.feature_size]) - labels = fluid.data('labels', [-1, 1], 'int64') + x = paddle.static.data('x', [-1, self.feature_size]) + labels = paddle.static.data('labels', [-1, 1], 'int64') path_table = None path_code = None if self.is_custom: - path_table = fluid.data('path_table', [-1, -1], 'int64') - path_code = fluid.data('path_code', [-1, -1], 'int64') + path_table = paddle.static.data('path_table', [-1, -1], 'int64') + path_code = paddle.static.data('path_code', [-1, -1], 'int64') weight_attr = paddle.nn.initializer.Assign(self.weight_np) bias_attr = paddle.nn.initializer.Assign(self.bias_np) loss = paddle.nn.HSigmoidLoss( diff --git a/python/paddle/fluid/tests/unittests/test_identity_loss_op.py b/python/paddle/fluid/tests/unittests/test_identity_loss_op.py index d9b8ee8fad3..576da1dbd99 100644 --- a/python/paddle/fluid/tests/unittests/test_identity_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_identity_loss_op.py @@ -127,7 +127,7 @@ class TestIdentityLossAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_shape) + x = paddle.static.data('X', self.x_shape) out1 = paddle.incubate.identity_loss(x) out2 = paddle.incubate.identity_loss(x, reduction=0) out3 = paddle.incubate.identity_loss(x, reduction=1) @@ -174,7 +174,7 @@ class TestIdentityLossAPI(unittest.TestCase): ) paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12], 'int32') + x = paddle.static.data('X', [10, 12], 'int32') self.assertRaises(TypeError, paddle.incubate.identity_loss, x) diff --git a/python/paddle/fluid/tests/unittests/test_identity_op.py b/python/paddle/fluid/tests/unittests/test_identity_op.py index 311a609dd51..ea0e7d89381 100644 --- a/python/paddle/fluid/tests/unittests/test_identity_op.py +++ b/python/paddle/fluid/tests/unittests/test_identity_op.py @@ -28,7 +28,7 @@ class TestIdentityAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) id_layer = paddle.nn.Identity() out = id_layer(x) exe = paddle.static.Executor(self.place) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py b/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py index acb39868e86..86f36d2c05b 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_load_static_param.py @@ -30,8 +30,8 @@ class TestDygraphLoadStatic(unittest.TestCase): def testLoadStaticModel(self): # static graph mode temp_dir = tempfile.TemporaryDirectory() - a = fluid.data(name="a", shape=[10, 10]) - conv_in = fluid.data(name="conv_in", shape=[None, 10, 10, 10]) + a = paddle.static.data(name="a", shape=[10, 10]) + conv_in = paddle.static.data(name="conv_in", shape=[None, 10, 10, 10]) fc_out1 = paddle.static.nn.fc(a, 10) fc_out2 = paddle.static.nn.fc(a, 20) @@ -43,7 +43,7 @@ class TestDygraphLoadStatic(unittest.TestCase): conv_in, num_filters=10, filter_size=5, act="relu" ) - conv3d_in = fluid.data( + conv3d_in = paddle.static.data( name='conv3d_in', shape=[None, 3, 12, 32, 32], dtype='float32' ) conv3d_out_1 = paddle.static.nn.conv3d( @@ -53,37 +53,43 @@ class TestDygraphLoadStatic(unittest.TestCase): input=conv3d_in, num_filters=2, filter_size=3, act="relu" ) - batchnorm_in = fluid.data( + batchnorm_in = paddle.static.data( name="batchnorm_in", shape=[None, 10], dtype='float32' ) batchnorm_out_1 = paddle.static.nn.batch_norm(batchnorm_in) batchnorm_out_2 = paddle.static.nn.batch_norm(batchnorm_in) - emb_in = fluid.data(name='emb_in', shape=[None, 10], dtype='int64') + emb_in = paddle.static.data( + name='emb_in', shape=[None, 10], dtype='int64' + ) emb_out_1 = paddle.static.nn.embedding(emb_in, [1000, 100]) emb_out_2 = paddle.static.nn.embedding(emb_in, [2000, 200]) - layernorm = fluid.data(name="ln", shape=[None, 10], dtype='float32') + layernorm = paddle.static.data( + name="ln", shape=[None, 10], dtype='float32' + ) layernorm_1 = paddle.static.nn.layer_norm(layernorm) layernorm_2 = paddle.static.nn.layer_norm(layernorm) - nce_in = fluid.data(name="nce_in", shape=[None, 100], dtype='float32') - nce_label = fluid.data( + nce_in = paddle.static.data( + name="nce_in", shape=[None, 100], dtype='float32' + ) + nce_label = paddle.static.data( name="nce_label", shape=[None, 10], dtype='int64' ) nce_out_1 = paddle.static.nn.nce(nce_in, nce_label, 10000) nce_out_2 = paddle.static.nn.nce(nce_in, nce_label, 10000) - prelu_in = fluid.data( + prelu_in = paddle.static.data( name="prelu_in", shape=[None, 5, 10, 10], dtype='float32' ) prelu_out_1 = paddle.static.nn.prelu(prelu_in, "channel") prelu_out_2 = paddle.static.nn.prelu(prelu_in, "channel") - bilinear_tensor_pro_x = fluid.data( + bilinear_tensor_pro_x = paddle.static.data( "t1", shape=[None, 5], dtype="float32" ) - bilinear_tensor_pro_y = fluid.data( + bilinear_tensor_pro_y = paddle.static.data( "t2", shape=[None, 4], dtype="float32" ) @@ -98,7 +104,7 @@ class TestDygraphLoadStatic(unittest.TestCase): ) ) - conv2d_trans_in = fluid.data( + conv2d_trans_in = paddle.static.data( name="conv2d_trans_in", shape=[None, 10, 10, 10] ) @@ -109,7 +115,7 @@ class TestDygraphLoadStatic(unittest.TestCase): conv2d_trans_in, num_filters=10, filter_size=5, act="relu" ) - conv3d_trans_in = fluid.data( + conv3d_trans_in = paddle.static.data( name='conv3d_trans_in', shape=[None, 3, 12, 32, 32], dtype='float32' ) conv3d_trans_out_1 = paddle.static.nn.conv3d_transpose( @@ -119,7 +125,7 @@ class TestDygraphLoadStatic(unittest.TestCase): input=conv3d_trans_in, num_filters=2, filter_size=3, act="relu" ) - groupnorm_in = fluid.data( + groupnorm_in = paddle.static.data( name='groupnorm_in', shape=[None, 8, 32, 32], dtype='float32' ) groupnorm_out1 = paddle.static.nn.group_norm( @@ -129,7 +135,7 @@ class TestDygraphLoadStatic(unittest.TestCase): input=groupnorm_in, groups=4, param_attr=True, bias_attr=True ) ''' - spec_norm = fluid.data(name='spec_norm', shape=[2, 8, 32, 32], dtype='float32') + spec_norm = paddle.static.data(name='spec_norm', shape=[2, 8, 32, 32], dtype='float32') spe_norm_out_1 = paddle.static.nn.spectral_norm(weight=spec_norm, dim=1, power_iters=2) spe_norm_out_2 = paddle.static.nn.spectral_norm(weight=spec_norm, dim=1, power_iters=2) ''' diff --git a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py index 9687f0fe7a4..7192dfcf288 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py @@ -561,15 +561,15 @@ class StaticGraphTrainModel: self.cfg = cfg def create_data_layer(): - image_real = fluid.data( + image_real = paddle.static.data( shape=[None, 3, cfg.image_size, cfg.image_size], dtype='float32', name='image_real', ) - label_org = fluid.data( + label_org = paddle.static.data( shape=[None, cfg.c_dim], dtype='float32', name='label_org' ) - label_trg = fluid.data( + label_trg = paddle.static.data( shape=[None, cfg.c_dim], dtype='float32', name='label_trg' ) return image_real, label_org, label_trg diff --git a/python/paddle/fluid/tests/unittests/test_index_sample_op.py b/python/paddle/fluid/tests/unittests/test_index_sample_op.py index d51474e9799..5d883f8bb84 100755 --- a/python/paddle/fluid/tests/unittests/test_index_sample_op.py +++ b/python/paddle/fluid/tests/unittests/test_index_sample_op.py @@ -136,8 +136,8 @@ class TestIndexSampleShape(unittest.TestCase): low=0, high=x_shape[1], size=index_shape ).astype(index_type) - x = fluid.data(name='x', shape=[-1, 5], dtype='float64') - index = fluid.data(name='index', shape=[-1, 3], dtype='int32') + x = paddle.static.data(name='x', shape=[-1, 5], dtype='float64') + index = paddle.static.data(name='index', shape=[-1, 3], dtype='int32') output = paddle.index_sample(x=x, index=index) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_inference_api.py b/python/paddle/fluid/tests/unittests/test_inference_api.py index be470180c2f..8b126d501a4 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_api.py +++ b/python/paddle/fluid/tests/unittests/test_inference_api.py @@ -79,7 +79,9 @@ def get_sample_model(): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - data = fluid.data(name="data", shape=[-1, 6, 64, 64], dtype="float32") + data = paddle.static.data( + name="data", shape=[-1, 6, 64, 64], dtype="float32" + ) conv_out = paddle.static.nn.conv2d( input=data, num_filters=3, diff --git a/python/paddle/fluid/tests/unittests/test_initializer.py b/python/paddle/fluid/tests/unittests/test_initializer.py index f87e62cb020..7e90a366554 100644 --- a/python/paddle/fluid/tests/unittests/test_initializer.py +++ b/python/paddle/fluid/tests/unittests/test_initializer.py @@ -668,7 +668,7 @@ class TestSetGlobalInitializer(unittest.TestCase): paddle.nn.initializer.Uniform(low=-0.5, high=0.5) ) with fluid.program_guard(main_prog, startup_prog): - x = fluid.data(name="x", shape=[1, 3, 32, 32]) + x = paddle.static.data(name="x", shape=[1, 3, 32, 32]) # default initilizer of param in layers.conv2d is NormalInitializer conv = paddle.static.nn.conv2d(x, 5, 3) @@ -696,7 +696,7 @@ class TestSetGlobalInitializer(unittest.TestCase): bias_init=paddle.nn.initializer.Normal(0.0, 2.0), ) with fluid.program_guard(main_prog, startup_prog): - x = fluid.data(name="x", shape=[1, 3, 32, 32]) + x = paddle.static.data(name="x", shape=[1, 3, 32, 32]) # default initilizer of bias in layers.conv2d is ConstantInitializer conv = paddle.static.nn.conv2d(x, 5, 3) diff --git a/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py b/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py index d1fc32cce1a..4408c50cc5c 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_addto_strategy.py @@ -57,7 +57,7 @@ def create_program(data_format="NCHW"): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): - x = fluid.data(name='img', shape=[-1, 3, 224, 224]) + x = paddle.static.data(name='img', shape=[-1, 3, 224, 224]) x.stop_gradient = False if data_format == "NHWC": x = paddle.transpose(x, [0, 2, 3, 1]) diff --git a/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py index ee5479d02f3..181d2d1a9db 100644 --- a/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_instance_norm_op_v2.py @@ -97,7 +97,9 @@ class TestInstanceNorm(unittest.TestCase): def compute_v1(x_np): with program_guard(Program(), Program()): ins = paddle.nn.InstanceNorm2D(shape[1]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = ins(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -106,7 +108,9 @@ class TestInstanceNorm(unittest.TestCase): def compute_v2(x_np): with program_guard(Program(), Program()): ins = paddle.nn.InstanceNorm2D(shape[1]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = ins(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] diff --git a/python/paddle/fluid/tests/unittests/test_inverse_op.py b/python/paddle/fluid/tests/unittests/test_inverse_op.py index 656b51bce3b..bdca19eb1e8 100644 --- a/python/paddle/fluid/tests/unittests/test_inverse_op.py +++ b/python/paddle/fluid/tests/unittests/test_inverse_op.py @@ -100,7 +100,9 @@ class TestInverseAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[4, 4], dtype="float64") + input = paddle.static.data( + name="input", shape=[4, 4], dtype="float64" + ) result = paddle.inverse(x=input) input_np = np.random.random([4, 4]).astype("float64") result_np = np.linalg.inv(input_np) @@ -139,16 +141,20 @@ class TestInverseAPIError(unittest.TestCase): # The data type of input must be float32 or float64. for dtype in ["bool", "int32", "int64", "float16"]: - input = fluid.data(name='input_' + dtype, shape=[4, 4], dtype=dtype) + input = paddle.static.data( + name='input_' + dtype, shape=[4, 4], dtype=dtype + ) self.assertRaises(TypeError, paddle.inverse, input) # When out is set, the data type must be the same as input. - input = fluid.data(name='input_1', shape=[4, 4], dtype="float32") - out = fluid.data(name='output', shape=[4, 4], dtype="float64") + input = paddle.static.data( + name='input_1', shape=[4, 4], dtype="float32" + ) + out = paddle.static.data(name='output', shape=[4, 4], dtype="float64") self.assertRaises(TypeError, paddle.inverse, input, out) # The number of dimensions of input must be >= 2. - input = fluid.data(name='input_2', shape=[4], dtype="float32") + input = paddle.static.data(name='input_2', shape=[4], dtype="float32") self.assertRaises(ValueError, paddle.inverse, input) @@ -160,7 +166,9 @@ class TestInverseSingularAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[4, 4], dtype="float64") + input = paddle.static.data( + name="input", shape=[4, 4], dtype="float64" + ) result = paddle.inverse(x=input) input_np = np.zeros([4, 4]).astype("float64") diff --git a/python/paddle/fluid/tests/unittests/test_io_save_load.py b/python/paddle/fluid/tests/unittests/test_io_save_load.py index 5ae765af9db..4e714908577 100644 --- a/python/paddle/fluid/tests/unittests/test_io_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_io_save_load.py @@ -71,8 +71,8 @@ class TestSaveInferenceModelAPIError(unittest.TestCase): start_prog = fluid.Program() main_prog = fluid.Program() with fluid.program_guard(main_prog, start_prog): - x = fluid.data(name='x', shape=[10, 16], dtype='float32') - y = fluid.data(name='y', shape=[10, 16], dtype='float32') + x = paddle.static.data(name='x', shape=[10, 16], dtype='float32') + y = paddle.static.data(name='y', shape=[10, 16], dtype='float32') z = paddle.static.nn.fc(x, 4) exe = fluid.Executor(fluid.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_isclose_op.py b/python/paddle/fluid/tests/unittests/test_isclose_op.py index c587420a0ce..cf60370df9a 100644 --- a/python/paddle/fluid/tests/unittests/test_isclose_op.py +++ b/python/paddle/fluid/tests/unittests/test_isclose_op.py @@ -129,8 +129,12 @@ class TestIscloseStatic(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') + x = paddle.static.data( + name='x', shape=[10, 10], dtype='float64' + ) + y = paddle.static.data( + name='y', shape=[10, 10], dtype='float64' + ) result = paddle.isclose(x, y) exe = paddle.fluid.Executor(place) fetches = exe.run( @@ -167,8 +171,10 @@ class TestIscloseError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='int32') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') + x = paddle.static.data(name='x', shape=[10, 10], dtype='int32') + y = paddle.static.data( + name='y', shape=[10, 10], dtype='float64' + ) result = paddle.isclose(x, y) self.assertRaises(TypeError, test_x_dtype) @@ -177,16 +183,18 @@ class TestIscloseError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='int32') + x = paddle.static.data( + name='x', shape=[10, 10], dtype='float64' + ) + y = paddle.static.data(name='y', shape=[10, 10], dtype='int32') result = paddle.isclose(x, y) self.assertRaises(TypeError, test_y_dtype) def test_attr(self): paddle.enable_static() - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - y = paddle.fluid.data(name='y', shape=[10, 10], dtype='float64') + x = paddle.static.data(name='x', shape=[10, 10], dtype='float64') + y = paddle.static.data(name='y', shape=[10, 10], dtype='float64') def test_rtol(): result = paddle.isclose(x, y, rtol=True) diff --git a/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py b/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py index 914d2ec1f9f..52b02e04121 100644 --- a/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_isfinite_v2_op.py @@ -29,7 +29,7 @@ def run_static(x_np, dtype, op_str, use_gpu=False): place = paddle.CUDAPlace(0) exe = fluid.Executor(place) with fluid.program_guard(main_program, startup_program): - x = paddle.fluid.data(name='x', shape=x_np.shape, dtype=dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=dtype) res = getattr(paddle.tensor, op_str)(x) exe.run(startup_program) static_result = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py b/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py index 5d6e3af092a..154a7bf7dae 100644 --- a/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py @@ -114,8 +114,8 @@ class TestKLDivLossDygraph(unittest.TestCase): self.run_kl_loss('none') def test_kl_loss_static_api(self): - input = paddle.fluid.data(name='input', shape=[5, 20]) - label = paddle.fluid.data(name='label', shape=[5, 20]) + input = paddle.static.data(name='input', shape=[5, 20]) + label = paddle.static.data(name='label', shape=[5, 20]) paddle.nn.functional.kl_div(input, label) paddle.nn.functional.kl_div(input, label, 'sum') diff --git a/python/paddle/fluid/tests/unittests/test_kron_op.py b/python/paddle/fluid/tests/unittests/test_kron_op.py index 1c1db6a223f..5b5b63486c8 100644 --- a/python/paddle/fluid/tests/unittests/test_kron_op.py +++ b/python/paddle/fluid/tests/unittests/test_kron_op.py @@ -93,8 +93,8 @@ class TestKronLayer(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - a_var = fluid.data("a", [-1, -1], dtype="float64") - b_var = fluid.data("b", [-1, -1], dtype="float64") + a_var = paddle.static.data("a", [-1, -1], dtype="float64") + b_var = paddle.static.data("b", [-1, -1], dtype="float64") out_var = paddle.kron(a_var, b_var) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_l1_loss.py b/python/paddle/fluid/tests/unittests/test_l1_loss.py index f1bce0d1b6a..ea56eead920 100644 --- a/python/paddle/fluid/tests/unittests/test_l1_loss.py +++ b/python/paddle/fluid/tests/unittests/test_l1_loss.py @@ -44,10 +44,10 @@ class TestFunctionalL1Loss(unittest.TestCase): self.assertTrue(dy_result.shape, [10, 10, 5]) def run_static(self, use_gpu=False): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=[10, 10, 5], dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=[10, 10, 5], dtype='float32' ) result0 = paddle.nn.functional.l1_loss(input, label) @@ -94,10 +94,10 @@ class TestFunctionalL1Loss(unittest.TestCase): # test case the raise message def test_errors(self): def test_value_error(): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=[10, 10, 5], dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=[10, 10, 5], dtype='float32' ) loss = paddle.nn.functional.l1_loss( @@ -134,10 +134,10 @@ class TestClassL1Loss(unittest.TestCase): self.assertTrue(dy_result.shape, [10, 10, 5]) def run_static(self, use_gpu=False): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=[10, 10, 5], dtype='float32' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=[10, 10, 5], dtype='float32' ) l1_loss = paddle.nn.loss.L1Loss() diff --git a/python/paddle/fluid/tests/unittests/test_label_smooth_functional.py b/python/paddle/fluid/tests/unittests/test_label_smooth_functional.py index 3c97be9c42b..621559286ea 100644 --- a/python/paddle/fluid/tests/unittests/test_label_smooth_functional.py +++ b/python/paddle/fluid/tests/unittests/test_label_smooth_functional.py @@ -47,7 +47,7 @@ class LabelSmoothTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - label_var = fluid.data( + label_var = paddle.static.data( "input", self.label_shape, dtype=self.dtype ) y_var = F.label_smooth( @@ -67,7 +67,7 @@ class LabelSmoothTestCase(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - label_var = fluid.data( + label_var = paddle.static.data( "input", self.label_shape, dtype=self.dtype ) y_var = F.label_smooth( diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py index d91cff14a21..caddec044ba 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op_v2.py @@ -90,7 +90,9 @@ class TestDygraphLayerNormv2(unittest.TestCase): def compute_v1(x_np): with program_guard(Program(), Program()): ln = paddle.nn.LayerNorm(shape[1:]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = ln(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] @@ -99,7 +101,9 @@ class TestDygraphLayerNormv2(unittest.TestCase): def compute_v2(x_np): with program_guard(Program(), Program()): ln = paddle.nn.LayerNorm(shape[1:]) - x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + x = paddle.static.data( + name='x', shape=x_np.shape, dtype=x_np.dtype + ) y = ln(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 195f8d82a66..1ca2c476848 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -1414,8 +1414,10 @@ class TestLayer(LayerTest): x = np.random.rand(3, 32, 32).astype("float32") y = np.array([[1], [0], [1]]) with self.static_graph(): - data = fluid.data(name="input", shape=[-1, 32, 32], dtype="float32") - label = fluid.data(name="label", shape=[-1, 1], dtype="int") + data = paddle.static.data( + name="input", shape=[-1, 32, 32], dtype="float32" + ) + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int") data_new = paddle.reshape(data, [3, 32 * 32]) fc_out = paddle.nn.Linear(32 * 32, 10)(data_new) predict = paddle.nn.functional.softmax(fc_out) @@ -2155,8 +2157,8 @@ class TestBook(LayerTest): def test_partial_sum(self): with self.static_graph(): - x = fluid.data(name="x", shape=[None, 3], dtype="float32") - y = fluid.data(name="y", shape=[None, 3], dtype="float32") + x = paddle.static.data(name="x", shape=[None, 3], dtype="float32") + y = paddle.static.data(name="y", shape=[None, 3], dtype="float32") sum = fluid.contrib.layers.partial_sum( [x, y], start_index=0, length=2 ) @@ -2164,7 +2166,9 @@ class TestBook(LayerTest): def test_batch_fc(self): with self.static_graph(): - input = fluid.data(name="input", shape=[16, 2, 3], dtype="float32") + input = paddle.static.data( + name="input", shape=[16, 2, 3], dtype="float32" + ) out = fluid.contrib.layers.batch_fc( input=input, param_size=[16, 3, 10], @@ -2185,8 +2189,10 @@ class TestBook(LayerTest): def test_rank_attention(self): with self.static_graph(): - input = fluid.data(name="input", shape=[None, 2], dtype="float32") - rank_offset = fluid.data( + input = paddle.static.data( + name="input", shape=[None, 2], dtype="float32" + ) + rank_offset = paddle.static.data( name="rank_offset", shape=[None, 7], dtype="int32" ) out = fluid.contrib.layers.rank_attention( @@ -2258,8 +2264,8 @@ class TestBook(LayerTest): def test_partial_concat(self): with self.static_graph(): - x = fluid.data(name="x", shape=[None, 3], dtype="float32") - y = fluid.data(name="y", shape=[None, 3], dtype="float32") + x = paddle.static.data(name="x", shape=[None, 3], dtype="float32") + y = paddle.static.data(name="y", shape=[None, 3], dtype="float32") concat1 = fluid.contrib.layers.partial_concat( [x, y], start_index=0, length=2 ) diff --git a/python/paddle/fluid/tests/unittests/test_lcm.py b/python/paddle/fluid/tests/unittests/test_lcm.py index 0e110468d6a..dc943a70a92 100644 --- a/python/paddle/fluid/tests/unittests/test_lcm.py +++ b/python/paddle/fluid/tests/unittests/test_lcm.py @@ -34,8 +34,12 @@ class TestLcmAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x1 = fluid.data(name='input1', dtype='int32', shape=self.x_shape) - x2 = fluid.data(name='input2', dtype='int32', shape=self.y_shape) + x1 = paddle.static.data( + name='input1', dtype='int32', shape=self.x_shape + ) + x2 = paddle.static.data( + name='input2', dtype='int32', shape=self.y_shape + ) out = paddle.lcm(x1, x2) place = ( diff --git a/python/paddle/fluid/tests/unittests/test_lerp_op.py b/python/paddle/fluid/tests/unittests/test_lerp_op.py index 625d5b1b13d..cf3704f1c25 100644 --- a/python/paddle/fluid/tests/unittests/test_lerp_op.py +++ b/python/paddle/fluid/tests/unittests/test_lerp_op.py @@ -119,8 +119,8 @@ class TestLerpAPI(unittest.TestCase): def run(place): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('x', [1, 4], dtype=self.dtype) - y = paddle.fluid.data('y', [1, 4], dtype=self.dtype) + x = paddle.static.data('x', [1, 4], dtype=self.dtype) + y = paddle.static.data('y', [1, 4], dtype=self.dtype) out = paddle.lerp(x, y, 0.5) exe = paddle.static.Executor(place) res = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_linalg_lstsq_op.py b/python/paddle/fluid/tests/unittests/test_linalg_lstsq_op.py index 94dc901a56d..1ea2b939e89 100644 --- a/python/paddle/fluid/tests/unittests/test_linalg_lstsq_op.py +++ b/python/paddle/fluid/tests/unittests/test_linalg_lstsq_op.py @@ -97,12 +97,12 @@ class LinalgLstsqTestCase(unittest.TestCase): paddle.set_device(dev) place = fluid.CPUPlace() if dev == "cpu" else fluid.CUDAPlace(0) with fluid.program_guard(fluid.Program(), fluid.Program()): - x = paddle.fluid.data( + x = paddle.static.data( name="x", shape=self._input_shape_1, dtype=self._input_data_1.dtype, ) - y = paddle.fluid.data( + y = paddle.static.data( name="y", shape=self._input_shape_2, dtype=self._input_data_2.dtype, diff --git a/python/paddle/fluid/tests/unittests/test_linalg_pinv_op.py b/python/paddle/fluid/tests/unittests/test_linalg_pinv_op.py index 074b0fb517a..15ea23ef2e5 100644 --- a/python/paddle/fluid/tests/unittests/test_linalg_pinv_op.py +++ b/python/paddle/fluid/tests/unittests/test_linalg_pinv_op.py @@ -68,7 +68,7 @@ class LinalgPinvTestCase(unittest.TestCase): places.append(fluid.CUDAPlace(0)) for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): - x = paddle.fluid.data( + x = paddle.static.data( name="input", shape=self._input_shape, dtype=self._input_data.dtype, diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_op.py index 9dfb5391f4b..327853ace04 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_op.py @@ -331,7 +331,7 @@ class TestLinearInterpOpError(unittest.TestCase): with program_guard(Program(), Program()): def input_shape_error(): - x1 = fluid.data(name="x1", shape=[1], dtype="float32") + x1 = paddle.static.data(name="x1", shape=[1], dtype="float32") out1 = paddle.nn.Upsample( size=[ 256, @@ -342,7 +342,9 @@ class TestLinearInterpOpError(unittest.TestCase): out1_res = out1(x1) def data_format_error(): - x2 = fluid.data(name="x2", shape=[1, 3, 128], dtype="float32") + x2 = paddle.static.data( + name="x2", shape=[1, 3, 128], dtype="float32" + ) out2 = paddle.nn.Upsample( size=[ 256, @@ -353,7 +355,9 @@ class TestLinearInterpOpError(unittest.TestCase): out2_res = out2(x2) def out_shape_error(): - x3 = fluid.data(name="x3", shape=[1, 3, 128], dtype="float32") + x3 = paddle.static.data( + name="x3", shape=[1, 3, 128], dtype="float32" + ) out3 = paddle.nn.Upsample( size=[ 256, diff --git a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py index 563bebf87e4..b98c0f7efeb 100755 --- a/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_interp_v2_op.py @@ -405,14 +405,16 @@ class TestLinearInterpOpError(unittest.TestCase): with program_guard(Program(), Program()): def input_shape_error(): - x1 = fluid.data(name="x1", shape=[1], dtype="float32") + x1 = paddle.static.data( + name="x1", shape=[1], dtype="float32" + ) out1 = paddle.nn.Upsample( size=[256], data_format='NCW', mode='linear' ) out1_res = out1(x1) def data_format_error(): - x2 = fluid.data( + x2 = paddle.static.data( name="x2", shape=[1, 3, 128], dtype="float32" ) out2 = paddle.nn.Upsample( @@ -421,7 +423,7 @@ class TestLinearInterpOpError(unittest.TestCase): out2_res = out2(x2) def out_shape_error(): - x3 = fluid.data( + x3 = paddle.static.data( name="x3", shape=[1, 3, 128], dtype="float32" ) out3 = paddle.nn.Upsample( diff --git a/python/paddle/fluid/tests/unittests/test_linspace.py b/python/paddle/fluid/tests/unittests/test_linspace.py index 27020dd2e0c..253be8b45b8 100644 --- a/python/paddle/fluid/tests/unittests/test_linspace.py +++ b/python/paddle/fluid/tests/unittests/test_linspace.py @@ -158,19 +158,21 @@ class TestLinspaceOpError(unittest.TestCase): self.assertRaises(TypeError, test_step_dtype) def test_start_dtype(): - start = fluid.data(shape=[1], dtype="float64", name="start") + start = paddle.static.data( + shape=[1], dtype="float64", name="start" + ) paddle.linspace(start, 10, 1, dtype="float32") self.assertRaises(ValueError, test_start_dtype) def test_end_dtype(): - end = fluid.data(shape=[1], dtype="float64", name="end") + end = paddle.static.data(shape=[1], dtype="float64", name="end") paddle.linspace(0, end, 1, dtype="float32") self.assertRaises(ValueError, test_end_dtype) def test_num_dtype(): - num = fluid.data(shape=[1], dtype="int32", name="step") + num = paddle.static.data(shape=[1], dtype="int32", name="step") paddle.linspace(0, 10, num, dtype="float32") self.assertRaises(TypeError, test_step_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py b/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py index c52e534f813..11ae9e855f6 100644 --- a/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py +++ b/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py @@ -80,10 +80,12 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase): startup_program = fluid.default_startup_program() main_program = fluid.default_main_program() - img = fluid.data( + img = paddle.static.data( name='img', shape=[None, 1, 28, 28], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) prediction, avg_loss = static_train_net(img, label) diff --git a/python/paddle/fluid/tests/unittests/test_log_softmax.py b/python/paddle/fluid/tests/unittests/test_log_softmax.py index 274d4cf05bd..9a8f336c930 100644 --- a/python/paddle/fluid/tests/unittests/test_log_softmax.py +++ b/python/paddle/fluid/tests/unittests/test_log_softmax.py @@ -151,7 +151,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): logsoftmax = paddle.nn.LogSoftmax(axis) # test static api with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = logsoftmax(x) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -185,7 +185,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): x = x.astype(dtype) ref_out = np.apply_along_axis(ref_log_softmax, axis, x) with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = F.log_softmax(x, axis, dtype) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -204,10 +204,10 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='X1', shape=[100], dtype='int32') + x = paddle.static.data(name='X1', shape=[100], dtype='int32') self.assertRaises(TypeError, F.log_softmax, x) - x = paddle.fluid.data(name='X2', shape=[100], dtype='float32') + x = paddle.static.data(name='X2', shape=[100], dtype='float32') self.assertRaises(TypeError, F.log_softmax, x, dtype='int32') diff --git a/python/paddle/fluid/tests/unittests/test_logit_op.py b/python/paddle/fluid/tests/unittests/test_logit_op.py index 0744b779fb4..597f8fe197f 100644 --- a/python/paddle/fluid/tests/unittests/test_logit_op.py +++ b/python/paddle/fluid/tests/unittests/test_logit_op.py @@ -90,7 +90,7 @@ class TestLogitAPI(unittest.TestCase): ref_out = logit(self.x, eps) # test static api with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = paddle.logit(x, eps) exe = paddle.static.Executor(self.place) out = exe.run(feed={'x': self.x}, fetch_list=[y]) @@ -110,10 +110,10 @@ class TestLogitAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data(name='X1', shape=[100], dtype='int32') + x = paddle.static.data(name='X1', shape=[100], dtype='int32') self.assertRaises(TypeError, paddle.logit, x) - x = paddle.fluid.data(name='X2', shape=[100], dtype='float32') + x = paddle.static.data(name='X2', shape=[100], dtype='float32') self.assertRaises(TypeError, paddle.logit, x, dtype='int32') diff --git a/python/paddle/fluid/tests/unittests/test_logsumexp.py b/python/paddle/fluid/tests/unittests/test_logsumexp.py index d65431f7417..41936d17b3d 100644 --- a/python/paddle/fluid/tests/unittests/test_logsumexp.py +++ b/python/paddle/fluid/tests/unittests/test_logsumexp.py @@ -189,7 +189,7 @@ class TestLogsumexpError(unittest.TestCase): def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): self.assertRaises(TypeError, paddle.logsumexp, 1) - x1 = paddle.fluid.data(name='x1', shape=[120], dtype="int32") + x1 = paddle.static.data(name='x1', shape=[120], dtype="int32") self.assertRaises(TypeError, paddle.logsumexp, x1) @@ -206,7 +206,7 @@ class TestLogsumexpAPI(unittest.TestCase): def api_case(self, axis=None, keepdim=False): out_ref = ref_logsumexp(self.x, axis, keepdim) with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) out = paddle.logsumexp(x, axis, keepdim) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) diff --git a/python/paddle/fluid/tests/unittests/test_lookahead.py b/python/paddle/fluid/tests/unittests/test_lookahead.py index 5860f81f736..d3647e50af7 100644 --- a/python/paddle/fluid/tests/unittests/test_lookahead.py +++ b/python/paddle/fluid/tests/unittests/test_lookahead.py @@ -35,7 +35,9 @@ class TestLookAhead(unittest.TestCase): startup = fluid.Program() with fluid.program_guard(train_program, startup): with fluid.unique_name.guard(): - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data( + name='X', shape=[None, 1], dtype='float32' + ) hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index 1892ce5c56d..4802add0167 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -17,6 +17,7 @@ import unittest import numpy as np from op_test import OpTest, check_out_dtype, skip_check_grad_ci +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.nn.functional as F @@ -168,21 +169,25 @@ class TestEmbedOpError(unittest.TestCase): def test_input_dtype(): # the input dtype must be int64 - input = fluid.data(name='x', shape=[4, 1], dtype='float32') + input = paddle.static.data( + name='x', shape=[4, 1], dtype='float32' + ) fluid.layers.embedding(input=input, size=(10, 64)) self.assertRaises(TypeError, test_input_dtype) def test_param_dtype(): # dtype must be float32 or float64 - input2 = fluid.data(name='x2', shape=[4, 1], dtype='int64') + input2 = paddle.static.data( + name='x2', shape=[4, 1], dtype='int64' + ) fluid.layers.embedding( input=input2, size=(10, 64), dtype='int64' ) self.assertRaises(TypeError, test_param_dtype) - input3 = fluid.data(name='x3', shape=[4, 1], dtype='int64') + input3 = paddle.static.data(name='x3', shape=[4, 1], dtype='int64') fluid.layers.embedding(input=input3, size=(10, 64), dtype='float16') diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py index dc0c8f3174b..5f8eba06003 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py @@ -275,20 +275,24 @@ class TestEmbedOpError(unittest.TestCase): def test_input_dtype(): # the input dtype must be int64 - input = fluid.data(name='x1', shape=[4, 6], dtype='float32') + input = paddle.static.data( + name='x1', shape=[4, 6], dtype='float32' + ) paddle.static.nn.embedding(input=input, size=(10, 64)) self.assertRaises(TypeError, test_input_dtype) def test_param_dtype(): # dtype must be float32 or float64 - input2 = fluid.data(name='x2', shape=[4, 6], dtype='int64') + input2 = paddle.static.data( + name='x2', shape=[4, 6], dtype='int64' + ) paddle.static.nn.embedding( input=input2, size=(10, 64), dtype='int64' ) self.assertRaises(TypeError, test_param_dtype) - input3 = fluid.data(name='x3', shape=[4, 6], dtype='int64') + input3 = paddle.static.data(name='x3', shape=[4, 6], dtype='int64') paddle.static.nn.embedding( input=input3, size=(10, 64), dtype='float16' ) diff --git a/python/paddle/fluid/tests/unittests/test_lrn_op.py b/python/paddle/fluid/tests/unittests/test_lrn_op.py index 25d90c07838..e80e89ff3a1 100644 --- a/python/paddle/fluid/tests/unittests/test_lrn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lrn_op.py @@ -120,10 +120,10 @@ class TestLocalResponseNormFAPI(unittest.TestCase): in_np1 = np.random.random([3, 40, 40]).astype("float32") in_np2 = np.transpose(in_np1, (0, 2, 1)) - input1 = fluid.data( + input1 = paddle.static.data( name="input1", shape=[3, 40, 40], dtype="float32" ) - input2 = fluid.data( + input2 = paddle.static.data( name="input2", shape=[3, 40, 40], dtype="float32" ) res1 = paddle.nn.functional.local_response_norm( @@ -144,10 +144,10 @@ class TestLocalResponseNormFAPI(unittest.TestCase): def check_static_4d_input(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input1 = fluid.data( + input1 = paddle.static.data( name="input1", shape=[3, 3, 40, 40], dtype="float32" ) - input2 = fluid.data( + input2 = paddle.static.data( name="input2", shape=[3, 40, 40, 3], dtype="float32" ) @@ -173,10 +173,10 @@ class TestLocalResponseNormFAPI(unittest.TestCase): def check_static_5d_input(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input1 = fluid.data( + input1 = paddle.static.data( name="input1", shape=[3, 3, 3, 40, 40], dtype="float32" ) - input2 = fluid.data( + input2 = paddle.static.data( name="input2", shape=[3, 3, 40, 40, 3], dtype="float32" ) res1 = paddle.nn.functional.local_response_norm( @@ -280,13 +280,17 @@ class TestLocalResponseNormFAPIError(unittest.TestCase): self.assertRaises(TypeError, test_Variable) def test_datatype(): - x = fluid.data(name='x', shape=[3, 4, 5, 6], dtype="int32") + x = paddle.static.data( + name='x', shape=[3, 4, 5, 6], dtype="int32" + ) paddle.nn.functional.local_response_norm(x, size=5) self.assertRaises(TypeError, test_datatype) def test_dataformat(): - x = fluid.data(name='x', shape=[3, 4, 5, 6], dtype="float32") + x = paddle.static.data( + name='x', shape=[3, 4, 5, 6], dtype="float32" + ) paddle.nn.functional.local_response_norm( x, size=5, data_format="NCTHW" ) @@ -294,7 +298,7 @@ class TestLocalResponseNormFAPIError(unittest.TestCase): self.assertRaises(ValueError, test_dataformat) def test_dim(): - x = fluid.data(name='x', shape=[3, 4], dtype="float32") + x = paddle.static.data(name='x', shape=[3, 4], dtype="float32") paddle.nn.functional.local_response_norm(x, size=5) self.assertRaises(ValueError, test_dim) diff --git a/python/paddle/fluid/tests/unittests/test_lu_op.py b/python/paddle/fluid/tests/unittests/test_lu_op.py index 3e083c76b71..ddba55e7d3f 100644 --- a/python/paddle/fluid/tests/unittests/test_lu_op.py +++ b/python/paddle/fluid/tests/unittests/test_lu_op.py @@ -273,7 +273,7 @@ class TestLUAPI(unittest.TestCase): NsU = np.pad(sU, upad) NLU = NsL + NsU - x = paddle.fluid.data( + x = paddle.static.data( name="input", shape=shape, dtype=dtype ) lu, p = paddle.linalg.lu(x, pivot=pivot) diff --git a/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py b/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py index d05b16df25c..2349f8251f2 100644 --- a/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py +++ b/python/paddle/fluid/tests/unittests/test_lu_unpack_op.py @@ -142,7 +142,7 @@ class TestLU_UnpackOp(OpTest): place = fluid.CPUPlace() if core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) - xv = paddle.fluid.data( + xv = paddle.static.data( name="input", shape=self.x_shape, dtype=self.dtype ) lu, p = paddle.linalg.lu(xv) @@ -278,7 +278,7 @@ class TestLU_UnpackAPI(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): sP, sL, sU = scipy_lu_unpack(a) - x = paddle.fluid.data( + x = paddle.static.data( name="input", shape=shape, dtype=dtype ) lu, p = paddle.linalg.lu(x) diff --git a/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py b/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py index bf08137b100..378c91aa8aa 100644 --- a/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py @@ -87,9 +87,11 @@ class TestMarginRankLossLayer(unittest.TestCase): start = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - label = fluid.data("label", (self.batch_size, 1), "float32") - x1 = fluid.data("x1", (self.batch_size, 1), "float32") - x2 = fluid.data("x2", (self.batch_size, 1), "float32") + label = paddle.static.data( + "label", (self.batch_size, 1), "float32" + ) + x1 = paddle.static.data("x1", (self.batch_size, 1), "float32") + x2 = paddle.static.data("x2", (self.batch_size, 1), "float32") out = paddle.nn.functional.margin_ranking_loss( x1, x2, label, self.margin, 'none' ) diff --git a/python/paddle/fluid/tests/unittests/test_masked_select_op.py b/python/paddle/fluid/tests/unittests/test_masked_select_op.py index 14d06a3d36b..63446d865cb 100644 --- a/python/paddle/fluid/tests/unittests/test_masked_select_op.py +++ b/python/paddle/fluid/tests/unittests/test_masked_select_op.py @@ -74,8 +74,8 @@ class TestMaskedSelectAPI(unittest.TestCase): def test_static_mode(self): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) @@ -99,9 +99,9 @@ class TestMaskedSelectError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') - mask_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') + mask_float = paddle.static.data( shape=shape, dtype='float32', name='mask_float' ) np_x = np.random.random(shape).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py index 1ac71759de5..fc4bbff2c57 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op.py @@ -164,9 +164,9 @@ for dim in [4]: class API_TestMm(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2], dtype="float64") - y = fluid.data(name='y', shape=[2], dtype='float64') - res = fluid.data(name="output", shape=[1], dtype="float64") + x = paddle.static.data(name="x", shape=[2], dtype="float64") + y = paddle.static.data(name='y', shape=[2], dtype='float64') + res = paddle.static.data(name="output", shape=[1], dtype="float64") result = paddle.mm(x, y) exe = fluid.Executor(fluid.CPUPlace()) data1 = np.random.rand(2) @@ -215,18 +215,22 @@ class API_TestMmError(unittest.TestCase): def test_errors(self): def test_error1(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data(name="data1", shape=[10, 2], dtype="float32") - data2 = fluid.data(name="data2", shape=[3, 10], dtype="float32") + data1 = paddle.static.data( + name="data1", shape=[10, 2], dtype="float32" + ) + data2 = paddle.static.data( + name="data2", shape=[3, 10], dtype="float32" + ) paddle.mm(data1, data2) self.assertRaises(ValueError, test_error1) def test_error2(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[-1, 10, 2], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[-1, 2, 10], dtype="float32" ) paddle.mm(data1, data2) @@ -235,10 +239,10 @@ class API_TestMmError(unittest.TestCase): def test_error3(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[10, 10, 2], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[3, 2, 10], dtype="float32" ) paddle.mm(data1, data2) diff --git a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py index 0338517399c..88831ffeb88 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py @@ -464,8 +464,12 @@ class TestMatMulV2API(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = fluid.data(name="input_x", shape=[4, 3], dtype="float32") - input_y = fluid.data(name="input_y", shape=[3, 4], dtype="float32") + input_x = paddle.static.data( + name="input_x", shape=[4, 3], dtype="float32" + ) + input_y = paddle.static.data( + name="input_y", shape=[3, 4], dtype="float32" + ) result = paddle.matmul(input_x, input_y) diff --git a/python/paddle/fluid/tests/unittests/test_matrix_nms_op.py b/python/paddle/fluid/tests/unittests/test_matrix_nms_op.py index 24ac8253d43..7a0b12892e4 100644 --- a/python/paddle/fluid/tests/unittests/test_matrix_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_matrix_nms_op.py @@ -19,7 +19,6 @@ import numpy as np from op_test import OpTest import paddle -import paddle.fluid as fluid from paddle.fluid import Program, program_guard @@ -329,10 +328,10 @@ class TestMatrixNMSError(unittest.TestCase): scores_np = np.transpose(scores, (0, 2, 1)) with program_guard(Program(), Program()): - boxes_data = fluid.data( + boxes_data = paddle.static.data( name='bboxes', shape=[M, C, BOX_SIZE], dtype='float32' ) - scores_data = fluid.data( + scores_data = paddle.static.data( name='scores', shape=[N, C, M], dtype='float32' ) diff --git a/python/paddle/fluid/tests/unittests/test_matrix_power_op.py b/python/paddle/fluid/tests/unittests/test_matrix_power_op.py index 8296aa320f5..0e638bb60e7 100644 --- a/python/paddle/fluid/tests/unittests/test_matrix_power_op.py +++ b/python/paddle/fluid/tests/unittests/test_matrix_power_op.py @@ -249,7 +249,9 @@ class TestMatrixPowerAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = fluid.data(name="input_x", shape=[4, 4], dtype="float64") + input_x = paddle.static.data( + name="input_x", shape=[4, 4], dtype="float64" + ) result = paddle.linalg.matrix_power(x=input_x, n=-2) input_np = np.random.random([4, 4]).astype("float64") result_np = np.linalg.matrix_power(input_np, -2) @@ -290,35 +292,45 @@ class TestMatrixPowerAPIError(unittest.TestCase): # n must be int for n in [2.0, '2', -2.0]: - input = fluid.data( + input = paddle.static.data( name="input_float32", shape=[4, 4], dtype='float32' ) self.assertRaises(TypeError, paddle.linalg.matrix_power, input, n) # The data type of input must be float32 or float64. for dtype in ["bool", "int32", "int64", "float16"]: - input = fluid.data(name="input_" + dtype, shape=[4, 4], dtype=dtype) + input = paddle.static.data( + name="input_" + dtype, shape=[4, 4], dtype=dtype + ) self.assertRaises(TypeError, paddle.linalg.matrix_power, input, 2) # When out is set, the data type must be the same as input. - input = fluid.data(name="input_1", shape=[4, 4], dtype="float32") - out = fluid.data(name="output", shape=[4, 4], dtype="float64") + input = paddle.static.data( + name="input_1", shape=[4, 4], dtype="float32" + ) + out = paddle.static.data(name="output", shape=[4, 4], dtype="float64") self.assertRaises(TypeError, paddle.linalg.matrix_power, input, 2, out) # The number of dimensions of input must be >= 2. - input = fluid.data(name="input_2", shape=[4], dtype="float32") + input = paddle.static.data(name="input_2", shape=[4], dtype="float32") self.assertRaises(ValueError, paddle.linalg.matrix_power, input, 2) # The inner-most 2 dimensions of input should be equal to each other - input = fluid.data(name="input_3", shape=[4, 5], dtype="float32") + input = paddle.static.data( + name="input_3", shape=[4, 5], dtype="float32" + ) self.assertRaises(ValueError, paddle.linalg.matrix_power, input, 2) # The size of input should not be 0 - input = fluid.data(name="input_4", shape=[1, 1, 0, 0], dtype="float32") + input = paddle.static.data( + name="input_4", shape=[1, 1, 0, 0], dtype="float32" + ) self.assertRaises(ValueError, paddle.linalg.matrix_power, input, 2) # The size of input should not be 0 - input = fluid.data(name="input_5", shape=[0, 0], dtype="float32") + input = paddle.static.data( + name="input_5", shape=[0, 0], dtype="float32" + ) self.assertRaises( ValueError, paddle.linalg.matrix_power, input, -956301312 ) @@ -332,7 +344,9 @@ class TestMatrixPowerSingularAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[4, 4], dtype="float64") + input = paddle.static.data( + name="input", shape=[4, 4], dtype="float64" + ) result = paddle.linalg.matrix_power(x=input, n=-2) input_np = np.zeros([4, 4]).astype("float64") diff --git a/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py b/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py index 86e751336e6..5e740a45b64 100644 --- a/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py +++ b/python/paddle/fluid/tests/unittests/test_matrix_rank_op.py @@ -175,10 +175,10 @@ class TestMatrixRankAPI(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): x_np = np.random.rand(3, 4, 7, 7).astype(np.float64) tol_np = np.random.random([3, 4]).astype(np.float32) - x_pd = paddle.fluid.data( + x_pd = paddle.static.data( name="X", shape=[3, 4, 7, 7], dtype='float64' ) - tol_pd = paddle.fluid.data( + tol_pd = paddle.static.data( name="TolTensor", shape=[3, 4], dtype='float32' ) rank_np = np.linalg.matrix_rank(x_np, tol_np, hermitian=False) @@ -196,7 +196,7 @@ class TestMatrixRankAPI(unittest.TestCase): for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): x_np = np.random.rand(3, 4, 7, 7).astype(np.float64) - x_pd = paddle.fluid.data( + x_pd = paddle.static.data( name="X", shape=[3, 4, 7, 7], dtype='float64' ) rank_np = np.linalg.matrix_rank(x_np, hermitian=True) @@ -212,7 +212,7 @@ class TestMatrixRankAPI(unittest.TestCase): for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): x_np = np.random.rand(3, 4, 7, 7).astype(np.float64) - x_pd = paddle.fluid.data( + x_pd = paddle.static.data( name="X", shape=[3, 4, 7, 7], dtype='float64' ) rank_np = np.linalg.matrix_rank(x_np, 0.1, hermitian=False) diff --git a/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py b/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py index 679dc7060f7..52cfa21424e 100644 --- a/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py +++ b/python/paddle/fluid/tests/unittests/test_max_min_amax_amin_op.py @@ -95,7 +95,9 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x = fluid.data(name='input', dtype=self.dtype, shape=self.shape) + x = paddle.static.data( + name='input', dtype=self.dtype, shape=self.shape + ) x.stop_gradient = False out = self._choose_paddle_func(func, x) diff --git a/python/paddle/fluid/tests/unittests/test_maxout_op.py b/python/paddle/fluid/tests/unittests/test_maxout_op.py index 7756f7d4ae8..0c3fb620f2c 100644 --- a/python/paddle/fluid/tests/unittests/test_maxout_op.py +++ b/python/paddle/fluid/tests/unittests/test_maxout_op.py @@ -97,7 +97,7 @@ class TestMaxoutAPI(unittest.TestCase): def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.maxout(x, self.groups, self.axis) m = paddle.nn.Maxout(self.groups, self.axis) out2 = m(x) @@ -127,12 +127,12 @@ class TestMaxoutAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.maxout, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[2, 4, 6, 8], dtype='int32' ) self.assertRaises(TypeError, F.maxout, x_int32) - x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8]) + x_float32 = paddle.static.data(name='x_float32', shape=[2, 4, 6, 8]) self.assertRaises(ValueError, F.maxout, x_float32, 2, 2) diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index 042ea928379..56f8f40a953 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -411,7 +411,7 @@ class TestMeanAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_shape) + x = paddle.static.data('X', self.x_shape) out1 = paddle.mean(x) out2 = paddle.tensor.mean(x) out3 = paddle.tensor.stat.mean(x) @@ -452,7 +452,7 @@ class TestMeanAPI(unittest.TestCase): def test_fluid_api(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - x = fluid.data("x", shape=[10, 10], dtype="float32") + x = paddle.static.data("x", shape=[10, 10], dtype="float32") out = paddle.mean(x=x, axis=1) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -476,7 +476,7 @@ class TestMeanAPI(unittest.TestCase): self.assertRaises(Exception, paddle.mean, x, 2) paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12], 'int32') + x = paddle.static.data('X', [10, 12], 'int32') self.assertRaises(TypeError, paddle.mean, x) diff --git a/python/paddle/fluid/tests/unittests/test_median.py b/python/paddle/fluid/tests/unittests/test_median.py index 47a14fbf889..738f98ed782 100644 --- a/python/paddle/fluid/tests/unittests/test_median.py +++ b/python/paddle/fluid/tests/unittests/test_median.py @@ -36,7 +36,7 @@ class TestMedian(unittest.TestCase): startup_program = Program() exe = paddle.static.Executor() with program_guard(main_program, startup_program): - x_in = paddle.fluid.data(shape=x.shape, dtype=x.dtype, name='x') + x_in = paddle.static.data(shape=x.shape, dtype=x.dtype, name='x') y = paddle.median(x_in, axis, keepdims) [res_pd] = exe.run(feed={'x': x}, fetch_list=[y]) self.check_numpy_res(res_pd, res_np) diff --git a/python/paddle/fluid/tests/unittests/test_meshgrid_op.py b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py index d74d3ea4fbc..1c08d0bc83d 100644 --- a/python/paddle/fluid/tests/unittests/test_meshgrid_op.py +++ b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py @@ -71,8 +71,8 @@ class TestMeshgridOp2(TestMeshgridOp): class TestMeshgridOp3(unittest.TestCase): def test_api(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, @@ -107,8 +107,8 @@ class TestMeshgridOp3(unittest.TestCase): class TestMeshgridOp4(unittest.TestCase): def test_list_input(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, @@ -144,8 +144,8 @@ class TestMeshgridOp4(unittest.TestCase): class TestMeshgridOp5(unittest.TestCase): def test_tuple_input(self): - x = fluid.data(shape=[100], dtype='int32', name='x') - y = fluid.data(shape=[200], dtype='int32', name='y') + x = paddle.static.data(shape=[100], dtype='int32', name='x') + y = paddle.static.data(shape=[200], dtype='int32', name='y') input_1 = np.random.randint( 0, diff --git a/python/paddle/fluid/tests/unittests/test_modelaverage.py b/python/paddle/fluid/tests/unittests/test_modelaverage.py index 156f0cfb8bc..8fe65891707 100644 --- a/python/paddle/fluid/tests/unittests/test_modelaverage.py +++ b/python/paddle/fluid/tests/unittests/test_modelaverage.py @@ -32,7 +32,9 @@ class TestModelAverage(unittest.TestCase): test_program = fluid.Program() with fluid.program_guard(train_program, startup): with fluid.unique_name.guard(): - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data( + name='X', shape=[None, 1], dtype='float32' + ) hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) test_program = train_program.clone() diff --git a/python/paddle/fluid/tests/unittests/test_mse_loss.py b/python/paddle/fluid/tests/unittests/test_mse_loss.py index 75dafb1ea35..9b96affb274 100644 --- a/python/paddle/fluid/tests/unittests/test_mse_loss.py +++ b/python/paddle/fluid/tests/unittests/test_mse_loss.py @@ -30,8 +30,12 @@ class TestMseLoss(unittest.TestCase): sub = input_val - label_val np_result = np.mean(sub * sub) - input_var = fluid.data(name="input", shape=[-1, 3], dtype="float32") - label_var = fluid.data(name="label", shape=[-1, 3], dtype="float32") + input_var = paddle.static.data( + name="input", shape=[-1, 3], dtype="float32" + ) + label_var = paddle.static.data( + name="label", shape=[-1, 3], dtype="float32" + ) output = paddle.nn.functional.mse_loss(input=input_var, label=label_var) for use_cuda in ( @@ -52,13 +56,17 @@ class TestMseInvalidInput(unittest.TestCase): def test_error(self): def test_invalid_input(): input = [256, 3] - label = fluid.data(name='label1', shape=[None, 3], dtype='float32') + label = paddle.static.data( + name='label1', shape=[None, 3], dtype='float32' + ) loss = paddle.nn.functional.mse_loss(input, label) self.assertRaises(TypeError, test_invalid_input) def test_invalid_label(): - input = fluid.data(name='input1', shape=[None, 3], dtype='float32') + input = paddle.static.data( + name='input1', shape=[None, 3], dtype='float32' + ) label = [256, 3] loss = paddle.nn.functional.mse_loss(input, label) @@ -219,10 +227,10 @@ class TestNNFunctionalMseLoss(unittest.TestCase): else paddle.CPUPlace() ) with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=dim, dtype='float32' ) - target = paddle.fluid.data( + target = paddle.static.data( name='target', shape=dim, dtype='float32' ) mse_loss = paddle.nn.functional.mse_loss(input, target, 'mean') @@ -261,10 +269,10 @@ class TestNNFunctionalMseLoss(unittest.TestCase): else paddle.CPUPlace() ) with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=dim, dtype='float32' ) - target = paddle.fluid.data( + target = paddle.static.data( name='target', shape=dim, dtype='float32' ) mse_loss = paddle.nn.functional.mse_loss(input, target, 'sum') @@ -303,10 +311,10 @@ class TestNNFunctionalMseLoss(unittest.TestCase): else paddle.CPUPlace() ) with paddle.static.program_guard(prog, startup_prog): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=dim, dtype='float32' ) - target = paddle.fluid.data( + target = paddle.static.data( name='target', shape=dim, dtype='float32' ) mse_loss = paddle.nn.functional.mse_loss(input, target, 'none') diff --git a/python/paddle/fluid/tests/unittests/test_multinomial_op.py b/python/paddle/fluid/tests/unittests/test_multinomial_op.py index c251dc696cd..4738496a2f4 100644 --- a/python/paddle/fluid/tests/unittests/test_multinomial_op.py +++ b/python/paddle/fluid/tests/unittests/test_multinomial_op.py @@ -169,7 +169,7 @@ class TestMultinomialApi(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - x = fluid.data('x', shape=[4], dtype='float32') + x = paddle.static.data('x', shape=[4], dtype='float32') out = paddle.multinomial(x, num_samples=100000, replacement=True) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_multiplex_op.py b/python/paddle/fluid/tests/unittests/test_multiplex_op.py index a0f8932ba23..184e5597e85 100644 --- a/python/paddle/fluid/tests/unittests/test_multiplex_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiplex_op.py @@ -63,9 +63,11 @@ class TestMultiplexOp(OpTest): class TestMultiplexOpError(unittest.TestCase): def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - x1 = fluid.data(name='x1', shape=[None, 2], dtype='int64') - x2 = fluid.data(name='x2', shape=[None, 2], dtype='int64') - index = fluid.data(name='index', shape=[None, 1], dtype='int32') + x1 = paddle.static.data(name='x1', shape=[None, 2], dtype='int64') + x2 = paddle.static.data(name='x2', shape=[None, 2], dtype='int64') + index = paddle.static.data( + name='index', shape=[None, 1], dtype='int32' + ) def test_list(): # the inputs type must be list @@ -79,14 +81,18 @@ class TestMultiplexOpError(unittest.TestCase): self.assertRaises(ValueError, test_len) def test_type(): - y1 = fluid.data(name='y1', shape=[None, 2], dtype='int16') - y2 = fluid.data(name='y2', shape=[None, 2], dtype='int16') + y1 = paddle.static.data( + name='y1', shape=[None, 2], dtype='int16' + ) + y2 = paddle.static.data( + name='y2', shape=[None, 2], dtype='int16' + ) paddle.multiplex(inputs=[y1, y2], index=index) self.assertRaises(TypeError, test_type) def test_type2(): - index2 = fluid.data( + index2 = paddle.static.data( name='index2', shape=[None, 1], dtype='int16' ) paddle.multiplex(inputs=[x1, x2], index=index2) diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py index 40b6f49a253..639ca491789 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py @@ -52,10 +52,12 @@ def simple_fc_net_static(): with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): - image = fluid.data( + image = paddle.static.data( name='image', shape=[None, IMAGE_SIZE], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = image param_attr = fluid.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.8) diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py index f4585fe3622..42b33346083 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py @@ -52,10 +52,12 @@ def simple_fc_net_static(): with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): - image = fluid.data( + image = paddle.static.data( name='image', shape=[None, IMAGE_SIZE], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) hidden = image param_attr = fluid.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.8) @@ -192,10 +194,10 @@ class TestStaticDataLoader(unittest.TestCase): class TestStaticDataLoaderReturnList(unittest.TestCase): def run_single_place(self, num_workers): scope = fluid.Scope() - image = fluid.data( + image = paddle.static.data( name='image', shape=[None, IMAGE_SIZE], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') with fluid.scope_guard(scope): dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) dataloader = DataLoader( @@ -215,10 +217,10 @@ class TestStaticDataLoaderReturnList(unittest.TestCase): def run_multi_place(self, num_workers): scope = fluid.Scope() - image = fluid.data( + image = paddle.static.data( name='image', shape=[None, IMAGE_SIZE], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') with fluid.scope_guard(scope): dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) dataloader = DataLoader( diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_reader_exception.py b/python/paddle/fluid/tests/unittests/test_multiprocess_reader_exception.py index 03257b75fb3..5edae8c5baa 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_reader_exception.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_reader_exception.py @@ -53,7 +53,9 @@ class TestMultiprocessReaderExceptionWithQueueSuccess(unittest.TestCase): return __impl__ with fluid.program_guard(fluid.Program(), fluid.Program()): - image = fluid.data(name='image', dtype='float32', shape=[None, 10]) + image = paddle.static.data( + name='image', dtype='float32', shape=[None, 10] + ) reader = fluid.io.DataLoader.from_generator( feed_list=[image], capacity=2, iterable=iterable ) diff --git a/python/paddle/fluid/tests/unittests/test_nan_to_num_op.py b/python/paddle/fluid/tests/unittests/test_nan_to_num_op.py index 7db79e4e80e..fc17cc913d0 100644 --- a/python/paddle/fluid/tests/unittests/test_nan_to_num_op.py +++ b/python/paddle/fluid/tests/unittests/test_nan_to_num_op.py @@ -71,7 +71,7 @@ class TestNanToNum(unittest.TestCase): out4_np = np_nan_to_num(x_np, 1.0, 9.0, -12.0) paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', x_np.shape) + x = paddle.static.data('X', x_np.shape) out1 = paddle.nan_to_num(x) out2 = paddle.nan_to_num(x, 1.0) out3 = paddle.nan_to_num(x, 1.0, 9.0) diff --git a/python/paddle/fluid/tests/unittests/test_nanmean_api.py b/python/paddle/fluid/tests/unittests/test_nanmean_api.py index 368251520fe..8c0a335c268 100644 --- a/python/paddle/fluid/tests/unittests/test_nanmean_api.py +++ b/python/paddle/fluid/tests/unittests/test_nanmean_api.py @@ -41,7 +41,7 @@ class TestNanmeanAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_shape) + x = paddle.static.data('X', self.x_shape) out1 = paddle.nanmean(x) out2 = paddle.tensor.nanmean(x) out3 = paddle.tensor.math.nanmean(x) @@ -90,7 +90,7 @@ class TestNanmeanAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [10, 12], 'int32') + x = paddle.static.data('X', [10, 12], 'int32') self.assertRaises(TypeError, paddle.nanmean, x) def test_api_dygraph_grad(self): diff --git a/python/paddle/fluid/tests/unittests/test_nanmedian.py b/python/paddle/fluid/tests/unittests/test_nanmedian.py index aeceadb0ea9..7f5ecb1865c 100644 --- a/python/paddle/fluid/tests/unittests/test_nanmedian.py +++ b/python/paddle/fluid/tests/unittests/test_nanmedian.py @@ -83,7 +83,7 @@ class TestNanmedian(unittest.TestCase): paddle.enable_static() np_res = np.nanmedian(data, keepdims=True) with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', data.shape) + x = paddle.static.data('X', data.shape) out1 = paddle.nanmedian(x, keepdim=True) out2 = paddle.tensor.nanmedian(x, keepdim=True) out3 = paddle.tensor.stat.nanmedian(x, keepdim=True) @@ -151,10 +151,10 @@ class TestNanmedian(unittest.TestCase): def test_errors(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data("X", [10, 12]) + x = paddle.static.data("X", [10, 12]) def test_dtype(): - x2 = paddle.fluid.data('X2', [10, 12], 'bool') + x2 = paddle.static.data('X2', [10, 12], 'bool') paddle.nanmedian(x2) def test_empty_axis(): diff --git a/python/paddle/fluid/tests/unittests/test_nansum_api.py b/python/paddle/fluid/tests/unittests/test_nansum_api.py index 87b05b4245d..3cd9005e000 100644 --- a/python/paddle/fluid/tests/unittests/test_nansum_api.py +++ b/python/paddle/fluid/tests/unittests/test_nansum_api.py @@ -26,7 +26,9 @@ class API_Test_Nansum(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 4]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 4] + ) out1 = paddle.nansum(input) out2 = paddle.nansum(input, axis=0) out3 = paddle.nansum(input, axis=-1) diff --git a/python/paddle/fluid/tests/unittests/test_neg_op.py b/python/paddle/fluid/tests/unittests/test_neg_op.py index 53f01b94d30..ea748a57aeb 100644 --- a/python/paddle/fluid/tests/unittests/test_neg_op.py +++ b/python/paddle/fluid/tests/unittests/test_neg_op.py @@ -36,7 +36,9 @@ class TestNegOp(unittest.TestCase): ) def run_static(self, use_gpu=False): - input = paddle.fluid.data(name='input', shape=[32, 8], dtype=self.dtype) + input = paddle.static.data( + name='input', shape=[32, 8], dtype=self.dtype + ) result = paddle.neg(input) place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_nll_loss.py b/python/paddle/fluid/tests/unittests/test_nll_loss.py index 2d6d52e5935..e1197aa0a18 100644 --- a/python/paddle/fluid/tests/unittests/test_nll_loss.py +++ b/python/paddle/fluid/tests/unittests/test_nll_loss.py @@ -91,8 +91,10 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[10, 10], dtype='float64') - label = fluid.data(name='label', shape=[10], dtype='int64') + input = paddle.static.data( + name='input', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data(name='label', shape=[10], dtype='int64') nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(input, label) @@ -137,8 +139,10 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[10, 10], dtype='float64') - label = fluid.data(name='label', shape=[10], dtype='int64') + input = paddle.static.data( + name='input', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data(name='label', shape=[10], dtype='int64') nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') res = nll_loss(input, label) @@ -186,9 +190,13 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[10, 10], dtype='float64') - label = fluid.data(name='label', shape=[10], dtype='int64') - weight = fluid.data(name='weight', shape=[10], dtype='float64') + input = paddle.static.data( + name='input', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data(name='label', shape=[10], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[10], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) @@ -244,9 +252,13 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[10, 10], dtype='float64') - label = fluid.data(name='label', shape=[10], dtype='int64') - weight = fluid.data(name='weight', shape=[10], dtype='float64') + input = paddle.static.data( + name='input', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data(name='label', shape=[10], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[10], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum') res = nll_loss(input, label) @@ -287,9 +299,13 @@ class TestNLLLoss(unittest.TestCase): startup_prog = fluid.Program() place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[10, 10], dtype='float64') - label = fluid.data(name='label', shape=[10], dtype='int64') - weight = fluid.data(name='weight', shape=[10], dtype='float64') + input = paddle.static.data( + name='input', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data(name='label', shape=[10], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[10], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) @@ -328,9 +344,13 @@ class TestNLLLoss(unittest.TestCase): startup_prog = fluid.Program() place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[10, 10], dtype='float64') - label = fluid.data(name='label', shape=[10], dtype='int64') - weight = fluid.data(name='weight', shape=[10], dtype='float64') + input = paddle.static.data( + name='input', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data(name='label', shape=[10], dtype='int64') + weight = paddle.static.data( + name='weight', shape=[10], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none') res = nll_loss(input, label) @@ -375,10 +395,12 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') + label = paddle.static.data( + name='label', shape=[5, 5, 5], dtype='int64' + ) nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(input, label) @@ -416,10 +438,12 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') + label = paddle.static.data( + name='label', shape=[5, 5, 5], dtype='int64' + ) nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') res = nll_loss(input, label) @@ -458,11 +482,15 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) @@ -503,11 +531,15 @@ class TestNLLLoss(unittest.TestCase): startup_prog = fluid.Program() place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) @@ -552,11 +584,15 @@ class TestNLLLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum') res = nll_loss(input, label) @@ -603,10 +639,12 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') + label = paddle.static.data( + name='label', shape=[5, 5, 5, 5], dtype='int64' + ) nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(input, label) @@ -651,11 +689,15 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) @@ -708,11 +750,15 @@ class TestNLLLoss(unittest.TestCase): ) place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum') res = nll_loss(input, label) @@ -768,11 +814,15 @@ class TestNLLLoss(unittest.TestCase): ) # place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none') res = nll_loss(input, label) @@ -824,11 +874,15 @@ class TestNLLLoss(unittest.TestCase): startup_prog = fluid.Program() place = fluid.CPUPlace() with fluid.program_guard(prog, startup_prog): - input = fluid.data( + input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) - label = fluid.data(name='label', shape=[5, 5, 5, 5], dtype='int64') - weight = fluid.data(name='weight', shape=[3], dtype='float64') + label = paddle.static.data( + name='label', shape=[5, 5, 5, 5], dtype='int64' + ) + weight = paddle.static.data( + name='weight', shape=[3], dtype='float64' + ) nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none') res = nll_loss(input, label) @@ -1081,8 +1135,8 @@ class TestNLLLossName(unittest.TestCase): startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - label = paddle.fluid.data(name='label', shape=[10], dtype='int64') + x = paddle.static.data(name='x', shape=[10, 10], dtype='float64') + label = paddle.static.data(name='label', shape=[10], dtype='int64') nll_loss = paddle.nn.loss.NLLLoss(name='nll_loss') res = nll_loss(x, label) self.assertTrue(res.name.startswith('nll_loss')) @@ -1095,14 +1149,14 @@ class TestNLLLossInvalidArgs(unittest.TestCase): startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.fluid.data( + x = paddle.static.data( name='x', shape=[ 10, ], dtype='float64', ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=[ 10, @@ -1162,8 +1216,10 @@ class TestNLLLossInvalidArgs(unittest.TestCase): startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - label = paddle.fluid.data( + x = paddle.static.data( + name='x', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data( name='label', shape=[10], dtype='int64' ) nll_loss = paddle.nn.loss.NLLLoss(reduction='') @@ -1189,8 +1245,10 @@ class TestNLLLossInvalidArgs(unittest.TestCase): startup_prog = paddle.static.Program() place = paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') - label = paddle.fluid.data( + x = paddle.static.data( + name='x', shape=[10, 10], dtype='float64' + ) + label = paddle.static.data( name='label', shape=[10], dtype='int64' ) res = paddle.nn.functional.nll_loss(x, label, reduction='') diff --git a/python/paddle/fluid/tests/unittests/test_norm_all.py b/python/paddle/fluid/tests/unittests/test_norm_all.py index 3b28007d4c8..4b58a5bc8ee 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_all.py +++ b/python/paddle/fluid/tests/unittests/test_norm_all.py @@ -415,7 +415,7 @@ class TestPnormBF16Op(OpTest): def run_fro(self, p, axis, shape_x, dtype, keep_dim, check_dim=False): with fluid.program_guard(fluid.Program()): - data = fluid.data(name="X", shape=shape_x, dtype=dtype) + data = paddle.static.data(name="X", shape=shape_x, dtype=dtype) out = paddle.norm(x=data, p=p, axis=axis, keepdim=keep_dim) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -437,7 +437,7 @@ def run_fro(self, p, axis, shape_x, dtype, keep_dim, check_dim=False): def run_pnorm(self, p, axis, shape_x, dtype, keep_dim, check_dim=False): with fluid.program_guard(fluid.Program()): - data = fluid.data(name="X", shape=shape_x, dtype=dtype) + data = paddle.static.data(name="X", shape=shape_x, dtype=dtype) out = paddle.norm(x=data, p=p, axis=axis, keepdim=keep_dim) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -640,7 +640,7 @@ class API_NormTest(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[10, 10], dtype="float32") + x = paddle.static.data(name="x", shape=[10, 10], dtype="float32") y_1 = paddle.norm(x, p='fro', name='frobenius_name') y_2 = paddle.norm(x, p=2, name='pnorm_name') self.assertEqual(('frobenius_name' in y_1.name), True) @@ -650,24 +650,28 @@ class API_NormTest(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): def err_dtype(p, shape_x, xdtype, out=None): - data = fluid.data(shape=shape_x, dtype=xdtype) + data = paddle.static.data(shape=shape_x, dtype=xdtype) paddle.norm(data, p=p, out=out) self.assertRaises(TypeError, err_dtype, "fro", [2, 2], "int64") self.assertRaises(ValueError, paddle.norm, "inf", [2], "int64") - out = fluid.data(name="out", shape=[1], dtype="int64") + out = paddle.static.data(name="out", shape=[1], dtype="int64") self.assertRaises( TypeError, err_dtype, "fro", [2, 2], "float64", out ) self.assertRaises(TypeError, err_dtype, 2, [10], "int64") self.assertRaises(TypeError, err_dtype, 2, [10], "float64", out) - data = fluid.data(name="data_2d", shape=[2, 2], dtype="float64") + data = paddle.static.data( + name="data_2d", shape=[2, 2], dtype="float64" + ) self.assertRaises(ValueError, paddle.norm, data, p="unsupport norm") self.assertRaises(ValueError, paddle.norm, data, p=[1]) self.assertRaises(ValueError, paddle.norm, data, p=[1], axis=-1) self.assertRaises(ValueError, paddle.norm, 0, [1, 0], "float64") - data = fluid.data(name="data_3d", shape=[2, 2, 2], dtype="float64") + data = paddle.static.data( + name="data_3d", shape=[2, 2, 2], dtype="float64" + ) self.assertRaises( ValueError, paddle.norm, data, p='unspport', axis=[-3, -2, -1] ) diff --git a/python/paddle/fluid/tests/unittests/test_norm_op.py b/python/paddle/fluid/tests/unittests/test_norm_op.py index 64a17969dde..2899e16e6f5 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_norm_op.py @@ -195,7 +195,7 @@ class API_NormTest(unittest.TestCase): with fluid.program_guard(fluid.Program()): def test_norm_x_type(): - data = fluid.data(name="x", shape=[3, 3], dtype="int64") + data = paddle.static.data(name="x", shape=[3, 3], dtype="int64") out = paddle.nn.functional.normalize(data) self.assertRaises(TypeError, test_norm_x_type) diff --git a/python/paddle/fluid/tests/unittests/test_normal.py b/python/paddle/fluid/tests/unittests/test_normal.py index 76e9c7a2f32..6009580a8cd 100644 --- a/python/paddle/fluid/tests/unittests/test_normal.py +++ b/python/paddle/fluid/tests/unittests/test_normal.py @@ -66,10 +66,10 @@ class TestNormalAPI(unittest.TestCase): self.std, np.ndarray ): with paddle.static.program_guard(paddle.static.Program()): - mean = paddle.fluid.data( + mean = paddle.static.data( 'Mean', self.mean.shape, self.mean.dtype ) - std = paddle.fluid.data('Std', self.std.shape, self.std.dtype) + std = paddle.static.data('Std', self.std.shape, self.std.dtype) out = paddle.normal(mean, std, self.shape) exe = paddle.static.Executor(self.place) @@ -85,7 +85,7 @@ class TestNormalAPI(unittest.TestCase): return ret_all elif isinstance(self.mean, np.ndarray): with paddle.static.program_guard(paddle.static.Program()): - mean = paddle.fluid.data( + mean = paddle.static.data( 'Mean', self.mean.shape, self.mean.dtype ) out = paddle.normal(mean, self.std, self.shape) @@ -97,7 +97,7 @@ class TestNormalAPI(unittest.TestCase): return ret_all elif isinstance(self.std, np.ndarray): with paddle.static.program_guard(paddle.static.Program()): - std = paddle.fluid.data('Std', self.std.shape, self.std.dtype) + std = paddle.static.data('Std', self.std.shape, self.std.dtype) out = paddle.normal(self.mean, std, self.shape) exe = paddle.static.Executor(self.place) @@ -203,17 +203,17 @@ class TestNormalErrors(unittest.TestCase): std = [1, 2, 3] self.assertRaises(TypeError, paddle.normal, std=std) - mean = paddle.fluid.data('Mean', [100], 'int32') + mean = paddle.static.data('Mean', [100], 'int32') self.assertRaises(TypeError, paddle.normal, mean) - std = paddle.fluid.data('Std', [100], 'int32') + std = paddle.static.data('Std', [100], 'int32') self.assertRaises(TypeError, paddle.normal, mean=1.0, std=std) self.assertRaises(TypeError, paddle.normal, shape=1) self.assertRaises(TypeError, paddle.normal, shape=[1.0]) - shape = paddle.fluid.data('Shape', [100], 'float32') + shape = paddle.static.data('Shape', [100], 'float32') self.assertRaises(TypeError, paddle.normal, shape=shape) diff --git a/python/paddle/fluid/tests/unittests/test_normalize.py b/python/paddle/fluid/tests/unittests/test_normalize.py index a986dd90415..2a27b42446d 100644 --- a/python/paddle/fluid/tests/unittests/test_normalize.py +++ b/python/paddle/fluid/tests/unittests/test_normalize.py @@ -55,8 +55,8 @@ class TestNNFunctionalNormalize(unittest.TestCase): self.assertRaises(BaseException, F.normalize, x) def run_static(self, use_gpu=False): - x = paddle.fluid.data(name='input', shape=[10, 10], dtype='float32') - x2 = paddle.fluid.data(name='input2', shape=[2], dtype='float32') + x = paddle.static.data(name='input', shape=[10, 10], dtype='float32') + x2 = paddle.static.data(name='input2', shape=[2], dtype='float32') result0 = F.normalize(x) result1 = F.normalize(x, p=1.5) result2 = F.normalize(x, axis=0) diff --git a/python/paddle/fluid/tests/unittests/test_npair_loss_op.py b/python/paddle/fluid/tests/unittests/test_npair_loss_op.py index ad4aaf1f0e2..a044a315259 100755 --- a/python/paddle/fluid/tests/unittests/test_npair_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_npair_loss_op.py @@ -130,13 +130,15 @@ class TestNpairLossOpError(unittest.TestCase): anchor_np = np.random.random((2, 4)).astype("float32") positive_np = np.random.random((2, 4)).astype("float32") labels_np = np.random.random((2)).astype("float32") - anchor_data = fluid.data( + anchor_data = paddle.static.data( name='anchor', shape=[2, 4], dtype='float32' ) - positive_data = fluid.data( + positive_data = paddle.static.data( name='positive', shape=[2, 4], dtype='float32' ) - labels_data = fluid.data(name='labels', shape=[2], dtype='float32') + labels_data = paddle.static.data( + name='labels', shape=[2], dtype='float32' + ) def test_anchor_Variable(): # the anchor type must be Variable @@ -162,7 +164,7 @@ class TestNpairLossOpError(unittest.TestCase): def test_anchor_type(): # dtype must be float32 or float64 - anchor_data1 = fluid.data( + anchor_data1 = paddle.static.data( name='anchor1', shape=[2, 4], dtype='int32' ) paddle.nn.functional.npair_loss( @@ -171,7 +173,7 @@ class TestNpairLossOpError(unittest.TestCase): def test_positive_type(): # dtype must be float32 or float64 - positive_data1 = fluid.data( + positive_data1 = paddle.static.data( name='positive1', shape=[2, 4], dtype='int32' ) paddle.nn.functional.npair_loss( @@ -182,7 +184,7 @@ class TestNpairLossOpError(unittest.TestCase): def test_labels_type(): # dtype must be float32 or float64 - labels_data1 = fluid.data( + labels_data1 = paddle.static.data( name='labels1', shape=[2], dtype='int32' ) paddle.nn.functional.npair_loss( diff --git a/python/paddle/fluid/tests/unittests/test_number_count_op.py b/python/paddle/fluid/tests/unittests/test_number_count_op.py index c2781b98e00..3e599ca1f0a 100644 --- a/python/paddle/fluid/tests/unittests/test_number_count_op.py +++ b/python/paddle/fluid/tests/unittests/test_number_count_op.py @@ -61,7 +61,7 @@ class TestNumberCountAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('x', self.x.shape, dtype="int64") + x = paddle.static.data('x', self.x.shape, dtype="int64") out = utils._number_count(x, self.upper_num) exe = paddle.static.Executor(self.place) res = exe.run(feed={'x': self.x}, fetch_list=[out]) diff --git a/python/paddle/fluid/tests/unittests/test_numel_op.py b/python/paddle/fluid/tests/unittests/test_numel_op.py index 8a908831384..3b3ce4eba2f 100644 --- a/python/paddle/fluid/tests/unittests/test_numel_op.py +++ b/python/paddle/fluid/tests/unittests/test_numel_op.py @@ -56,8 +56,8 @@ class TestNumelAPI(unittest.TestCase): with fluid.program_guard(main_program, startup_program): shape1 = [2, 1, 4, 5] shape2 = [1, 4, 5] - x_1 = paddle.fluid.data(shape=shape1, dtype='int32', name='x_1') - x_2 = paddle.fluid.data(shape=shape2, dtype='int32', name='x_2') + x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1') + x_2 = paddle.static.data(shape=shape2, dtype='int32', name='x_2') input_1 = np.random.random(shape1).astype("int32") input_2 = np.random.random(shape2).astype("int32") out_1 = paddle.numel(x_1) diff --git a/python/paddle/fluid/tests/unittests/test_ones_like.py b/python/paddle/fluid/tests/unittests/test_ones_like.py index e7f06d526c0..f6481a805b3 100644 --- a/python/paddle/fluid/tests/unittests/test_ones_like.py +++ b/python/paddle/fluid/tests/unittests/test_ones_like.py @@ -26,7 +26,7 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ class TestOnesLikeAPIError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x = paddle.fluid.data('x', [3, 4]) + x = paddle.static.data('x', [3, 4]) self.assertRaises(TypeError, ones_like, x, 'int8') @@ -36,7 +36,7 @@ class TestOnesLikeAPI(unittest.TestCase): startup_program = Program() train_program = Program() with program_guard(train_program, startup_program): - x = paddle.fluid.data('X', shape) + x = paddle.static.data('X', shape) # 'bool', 'float32', 'float64', 'int32', 'int64' out1 = ones_like(x) diff --git a/python/paddle/fluid/tests/unittests/test_op_name_conflict.py b/python/paddle/fluid/tests/unittests/test_op_name_conflict.py index ec4e98b907d..202a6bc0f6c 100644 --- a/python/paddle/fluid/tests/unittests/test_op_name_conflict.py +++ b/python/paddle/fluid/tests/unittests/test_op_name_conflict.py @@ -27,8 +27,8 @@ class TestOpNameConflict(unittest.TestCase): startup = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - x = fluid.data(name="x", shape=[1], dtype='float32') - y = fluid.data(name="y", shape=[1], dtype='float32') + x = paddle.static.data(name="x", shape=[1], dtype='float32') + y = paddle.static.data(name="y", shape=[1], dtype='float32') m = paddle.log2(x, name="log2") n = paddle.log2(y, name="log2") diff --git a/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py b/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py index 2fd87456b69..317b779dd5e 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py @@ -89,14 +89,14 @@ def static( opt.minimize(avg_loss) return avg_loss - image = fluid.data('image', [BATCH_SIZE, INPUT_SIZE], 'float32') - label = fluid.data('label', [BATCH_SIZE, 1], 'int64') + image = paddle.static.data('image', [BATCH_SIZE, INPUT_SIZE], 'float32') + label = paddle.static.data('label', [BATCH_SIZE, 1], 'int64') hidden, prediction = double_fc_net(image) adam = optimizer.Adam(learning_rate=LR) sgd = optimizer.SGD(learning_rate=LR) - id = fluid.data('id', [1], 'int32') + id = paddle.static.data('id', [1], 'int32') two = paddle.tensor.fill_constant([1], 'int32', 2) mod_two = paddle.remainder(id, two) == 0 diff --git a/python/paddle/fluid/tests/unittests/test_pad3d_op.py b/python/paddle/fluid/tests/unittests/test_pad3d_op.py index cd93f48b7eb..815ae1d94a9 100644 --- a/python/paddle/fluid/tests/unittests/test_pad3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad3d_op.py @@ -199,7 +199,7 @@ class TestPadAPI(unittest.TestCase): mode = "constant" value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) result = F.pad( x=x, pad=pad, value=value, mode=mode, data_format="NCDHW" ) @@ -220,7 +220,7 @@ class TestPadAPI(unittest.TestCase): pad = [1, 2, 1, 1, 1, 2] mode = "reflect" input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") exe = Executor(place) @@ -246,7 +246,7 @@ class TestPadAPI(unittest.TestCase): pad = [1, 2, 1, 1, 3, 4] mode = "replicate" input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") exe = Executor(place) @@ -272,7 +272,7 @@ class TestPadAPI(unittest.TestCase): pad = [1, 2, 1, 1, 3, 4] mode = "circular" input_data = np.random.rand(*input_shape).astype(np.float32) - x = paddle.fluid.data(name="x", shape=input_shape) + x = paddle.static.data(name="x", shape=input_shape) result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") exe = Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_pad_op.py b/python/paddle/fluid/tests/unittests/test_pad_op.py index 93fb376ee70..effc6cc70a6 100644 --- a/python/paddle/fluid/tests/unittests/test_pad_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad_op.py @@ -20,7 +20,6 @@ from eager_op_test import OpTest from test_attribute_var import UnittestBase import paddle -import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import Program, program_guard @@ -121,7 +120,7 @@ class TestPadOpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable) - data = fluid.data(name='data', shape=[4], dtype='float16') + data = paddle.static.data(name='data', shape=[4], dtype='float16') paddle.nn.functional.pad(x=data, pad=[0, 1]) diff --git a/python/paddle/fluid/tests/unittests/test_paddle_fluid_modelaverage.py b/python/paddle/fluid/tests/unittests/test_paddle_fluid_modelaverage.py index 1dfd7f2f858..d40f91dea2b 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_fluid_modelaverage.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_fluid_modelaverage.py @@ -31,7 +31,9 @@ class TestModelAverage(unittest.TestCase): test_program = fluid.Program() with fluid.program_guard(train_program, startup): with fluid.unique_name.guard(): - data = fluid.data(name='X', shape=[None, 1], dtype='float32') + data = paddle.static.data( + name='X', shape=[None, 1], dtype='float32' + ) hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) test_program = train_program.clone() diff --git a/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py b/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py index 70fe5cc8d8a..099c191004c 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_save_load_binary.py @@ -141,7 +141,9 @@ class TestSaveLoadBinaryFormat(unittest.TestCase): paddle.enable_static() OUTPUT_NUM = 32 with new_program_scope(): - x = fluid.data(name="x", shape=[None, IMAGE_SIZE], dtype='float32') + x = paddle.static.data( + name="x", shape=[None, IMAGE_SIZE], dtype='float32' + ) y = paddle.static.nn.fc( x, OUTPUT_NUM, diff --git a/python/paddle/fluid/tests/unittests/test_pairwise_distance.py b/python/paddle/fluid/tests/unittests/test_pairwise_distance.py index 1c3b7d261a5..56ac004cf1c 100644 --- a/python/paddle/fluid/tests/unittests/test_pairwise_distance.py +++ b/python/paddle/fluid/tests/unittests/test_pairwise_distance.py @@ -58,8 +58,8 @@ def test_static( ) paddle.enable_static() with paddle.static.program_guard(prog, startup_prog): - x = paddle.fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) - y = paddle.fluid.data(name='y', shape=y_np.shape, dtype=x_np.dtype) + x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype) + y = paddle.static.data(name='y', shape=y_np.shape, dtype=x_np.dtype) if functional: distance = call_pairwise_distance_functional( diff --git a/python/paddle/fluid/tests/unittests/test_pixel_shuffle_op.py b/python/paddle/fluid/tests/unittests/test_pixel_shuffle_op.py index 3543cce6ad0..570ed7aee3f 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_shuffle_op.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_shuffle_op.py @@ -113,10 +113,10 @@ class TestPixelShuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 9, 4, 4], dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 4, 4, 9], dtype="float64" ) out_1 = F.pixel_shuffle(x_1, 3) @@ -149,10 +149,10 @@ class TestPixelShuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) self.x_1_np = np.random.random([2, 9, 4, 4]).astype("float16") self.x_2_np = np.random.random([2, 4, 4, 9]).astype("float16") - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 9, 4, 4], dtype="float16" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 4, 4, 9], dtype="float16" ) # init instance @@ -186,10 +186,10 @@ class TestPixelShuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 9, 4, 4], dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 4, 4, 9], dtype="float64" ) # init instance diff --git a/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py b/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py index 2aa064c2dc8..e9bb76ea2b9 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py @@ -140,10 +140,10 @@ class TestPixelUnshuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 1, 12, 12], dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 12, 12, 1], dtype="float64" ) out_1 = F.pixel_unshuffle(x_1, 3) @@ -177,10 +177,10 @@ class TestPixelUnshuffleAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=[2, 1, 12, 12], dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=[2, 12, 12, 1], dtype="float64" ) # init instance diff --git a/python/paddle/fluid/tests/unittests/test_pool1d_api.py b/python/paddle/fluid/tests/unittests/test_pool1d_api.py index 212a896d40e..b654f70c38d 100644 --- a/python/paddle/fluid/tests/unittests/test_pool1d_api.py +++ b/python/paddle/fluid/tests/unittests/test_pool1d_api.py @@ -123,7 +123,9 @@ class TestPool1D_API(unittest.TestCase): def check_avg_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[2, 3, 32], dtype="float32") + input = paddle.static.data( + name="input", shape=[2, 3, 32], dtype="float32" + ) result = F.avg_pool1d(input, kernel_size=2, stride=2, padding=0) input_np = np.random.random([2, 3, 32]).astype("float32") @@ -206,7 +208,9 @@ class TestPool1D_API(unittest.TestCase): def check_max_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[2, 3, 32], dtype="float32") + input = paddle.static.data( + name="input", shape=[2, 3, 32], dtype="float32" + ) result = F.max_pool1d(input, kernel_size=2, stride=2, padding=[0]) input_np = np.random.random([2, 3, 32]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_api.py b/python/paddle/fluid/tests/unittests/test_pool2d_api.py index fcdec610a48..dd6ae87f64d 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_api.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_api.py @@ -36,7 +36,7 @@ class TestPool2D_API(unittest.TestCase): def check_avg_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 32, 32], dtype="float32" ) result = avg_pool2d(input, kernel_size=2, stride=2, padding=0) @@ -128,7 +128,7 @@ class TestPool2D_API(unittest.TestCase): def check_max_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 32, 32], dtype="float32" ) result = max_pool2d(input, kernel_size=2, stride=2, padding=0) diff --git a/python/paddle/fluid/tests/unittests/test_pool3d_api.py b/python/paddle/fluid/tests/unittests/test_pool3d_api.py index 2c069b9e844..d46e9f76c8f 100644 --- a/python/paddle/fluid/tests/unittests/test_pool3d_api.py +++ b/python/paddle/fluid/tests/unittests/test_pool3d_api.py @@ -36,7 +36,7 @@ class TestPool3D_API(unittest.TestCase): def check_avg_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 32, 32, 32], dtype="float32" ) result = avg_pool3d(input, kernel_size=2, stride=2, padding=0) @@ -141,7 +141,7 @@ class TestPool3D_API(unittest.TestCase): def check_max_static_results(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 32, 32, 32], dtype="float32" ) result = max_pool3d(input, kernel_size=2, stride=2, padding=0) diff --git a/python/paddle/fluid/tests/unittests/test_prelu_op.py b/python/paddle/fluid/tests/unittests/test_prelu_op.py index 4a4d5921bbb..c9d1a21fdad 100644 --- a/python/paddle/fluid/tests/unittests/test_prelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_prelu_op.py @@ -51,8 +51,8 @@ class TestFunctionalPReluAPI(unittest.TestCase): def static_check(self, weight_np): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, 'float32') - weight = paddle.fluid.data('Alpha', weight_np.shape, 'float32') + x = paddle.static.data('X', self.x_np.shape, 'float32') + weight = paddle.static.data('Alpha', weight_np.shape, 'float32') out = F.prelu(x, weight) exe = paddle.static.Executor(self.place) res = exe.run( @@ -80,18 +80,18 @@ class TestFunctionalPReluAPI(unittest.TestCase): def test_error(self): with paddle.static.program_guard(paddle.static.Program()): - weight_fp32 = paddle.fluid.data( + weight_fp32 = paddle.static.data( name='weight_fp32', shape=[1], dtype='float32' ) # The input type must be Variable. self.assertRaises(TypeError, F.prelu, x=1, weight=weight_fp32) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[2, 3], dtype='int32' ) self.assertRaises(TypeError, F.prelu, x=x_int32, weight=weight_fp32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[2, 3], dtype='float16' ) F.prelu(x=x_fp16, weight=weight_fp32) @@ -110,7 +110,7 @@ class TestNNPReluAPI(unittest.TestCase): startup_program = paddle.static.Program() train_program = paddle.static.Program() with paddle.static.program_guard(train_program, startup_program): - x = paddle.fluid.data( + x = paddle.static.data( name='X', shape=self.x_np.shape, dtype='float32' ) m = paddle.nn.PReLU() @@ -463,7 +463,7 @@ class TestModeError(unittest.TestCase): def test_mode_error(self): main_program = Program() with fluid.program_guard(main_program, Program()): - x = fluid.data(name='x', shape=[2, 3, 4, 5]) + x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = prelu_t(x, 'any') except Exception as e: @@ -472,7 +472,7 @@ class TestModeError(unittest.TestCase): def test_data_format_error1(self): main_program = Program() with fluid.program_guard(main_program, Program()): - x = fluid.data(name='x', shape=[2, 3, 4, 5]) + x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = prelu_t(x, 'channel', data_format='N') except Exception as e: @@ -481,7 +481,7 @@ class TestModeError(unittest.TestCase): def test_data_format_error2(self): main_program = Program() with fluid.program_guard(main_program, Program()): - x = fluid.data(name='x', shape=[2, 3, 4, 5]) + x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = paddle.static.nn.prelu(x, 'channel', data_format='N') except ValueError as e: diff --git a/python/paddle/fluid/tests/unittests/test_prod_op.py b/python/paddle/fluid/tests/unittests/test_prod_op.py index 47b41aafc32..2146655baf5 100644 --- a/python/paddle/fluid/tests/unittests/test_prod_op.py +++ b/python/paddle/fluid/tests/unittests/test_prod_op.py @@ -71,7 +71,7 @@ class TestProdOp(unittest.TestCase): ) def run_static(self, use_gpu=False): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=[10, 10, 5], dtype='float32' ) result0 = paddle.prod(input) @@ -154,8 +154,8 @@ class TestProdOpError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[2, 2, 4], dtype='float32') - bool_x = paddle.fluid.data( + x = paddle.static.data(name='x', shape=[2, 2, 4], dtype='float32') + bool_x = paddle.static.data( name='bool_x', shape=[2, 2, 4], dtype='bool' ) # The argument x shoule be a Tensor diff --git a/python/paddle/fluid/tests/unittests/test_program.py b/python/paddle/fluid/tests/unittests/test_program.py index dfc9b7572da..c9de1f95e65 100644 --- a/python/paddle/fluid/tests/unittests/test_program.py +++ b/python/paddle/fluid/tests/unittests/test_program.py @@ -106,7 +106,7 @@ class TestProgram(unittest.TestCase): def test_program_all_parameters(self): program = fluid.default_main_program() - data = fluid.data(name='x', shape=[None, 13], dtype='float32') + data = paddle.static.data(name='x', shape=[None, 13], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) diff --git a/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py b/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py index 7470dae1846..75be4531820 100644 --- a/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py +++ b/python/paddle/fluid/tests/unittests/test_put_along_axis_op.py @@ -86,9 +86,9 @@ class TestPutAlongAxisAPI(unittest.TestCase): def run(place): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) - index = paddle.fluid.data('Index', self.index_shape, "int64") - value = paddle.fluid.data('Value', self.value_shape) + x = paddle.static.data('X', self.shape) + index = paddle.static.data('Index', self.index_shape, "int64") + value = paddle.static.data('Value', self.value_shape) out = paddle.put_along_axis(x, index, value, self.axis) exe = paddle.static.Executor(self.place[0]) res = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_pyramid_hash_op.py b/python/paddle/fluid/tests/unittests/test_pyramid_hash_op.py index 6af32d58705..3ddf5cdde23 100644 --- a/python/paddle/fluid/tests/unittests/test_pyramid_hash_op.py +++ b/python/paddle/fluid/tests/unittests/test_pyramid_hash_op.py @@ -16,6 +16,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid @@ -24,7 +25,9 @@ class TestPyramidHashOpApi(unittest.TestCase): num_voc = 128 embed_dim = 64 x_shape, x_lod = [16, 10], [[3, 5, 2, 6]] - x = fluid.data(name='x', shape=x_shape, dtype='int32', lod_level=1) + x = paddle.static.data( + name='x', shape=x_shape, dtype='int32', lod_level=1 + ) hash_embd = fluid.contrib.search_pyramid_hash( input=x, num_emb=embed_dim, diff --git a/python/paddle/fluid/tests/unittests/test_qr_op.py b/python/paddle/fluid/tests/unittests/test_qr_op.py index 44dbeb902b9..c459a727bcf 100644 --- a/python/paddle/fluid/tests/unittests/test_qr_op.py +++ b/python/paddle/fluid/tests/unittests/test_qr_op.py @@ -225,7 +225,7 @@ class TestQrAPI(unittest.TestCase): tmp_q, tmp_r = np.linalg.qr(a[coord], mode=mode) np_q[coord] = tmp_q np_r[coord] = tmp_r - x = paddle.fluid.data( + x = paddle.static.data( name="input", shape=shape, dtype=dtype ) if mode == "r": diff --git a/python/paddle/fluid/tests/unittests/test_rad2deg.py b/python/paddle/fluid/tests/unittests/test_rad2deg.py index 7332a113b87..d37e8987161 100644 --- a/python/paddle/fluid/tests/unittests/test_rad2deg.py +++ b/python/paddle/fluid/tests/unittests/test_rad2deg.py @@ -36,7 +36,9 @@ class TestRad2degAPI(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(startup_program, train_program): - x = fluid.data(name='input', dtype=self.x_dtype, shape=self.x_shape) + x = paddle.static.data( + name='input', dtype=self.x_dtype, shape=self.x_shape + ) out = paddle.rad2deg(x) place = ( diff --git a/python/paddle/fluid/tests/unittests/test_rand_op.py b/python/paddle/fluid/tests/unittests/test_rand_op.py index d259f2c9163..94dc929f151 100644 --- a/python/paddle/fluid/tests/unittests/test_rand_op.py +++ b/python/paddle/fluid/tests/unittests/test_rand_op.py @@ -68,10 +68,12 @@ class TestRandOp(unittest.TestCase): dim_2 = paddle.tensor.fill_constant([1], "int32", 5) result_2 = rand(shape=[dim_1, dim_2]) - var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64") + var_shape = paddle.static.data( + name='var_shape', shape=[2], dtype="int64" + ) result_3 = rand(var_shape) - var_shape_int32 = fluid.data( + var_shape_int32 = paddle.static.data( name='var_shape_int32', shape=[2], dtype="int32" ) result_4 = rand(var_shape_int32) diff --git a/python/paddle/fluid/tests/unittests/test_randint_like.py b/python/paddle/fluid/tests/unittests/test_randint_like.py index 76e7b204be4..fdfac01b8bd 100644 --- a/python/paddle/fluid/tests/unittests/test_randint_like.py +++ b/python/paddle/fluid/tests/unittests/test_randint_like.py @@ -41,7 +41,7 @@ class TestRandintLikeAPI(unittest.TestCase): paddle.enable_static() with program_guard(Program(), Program()): # results are from [-100, 100). - x_bool = paddle.fluid.data( + x_bool = paddle.static.data( name="x_bool", shape=[10, 12], dtype="bool" ) exe = paddle.static.Executor(self.place) @@ -55,7 +55,7 @@ class TestRandintLikeAPI(unittest.TestCase): self.assertTrue(out.dtype, np.dtype(dtype)) self.assertTrue(((out >= -10) & (out <= 10)).all(), True) with program_guard(Program(), Program()): - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name="x_int32", shape=[10, 12], dtype="int32" ) exe = paddle.static.Executor(self.place) @@ -70,7 +70,7 @@ class TestRandintLikeAPI(unittest.TestCase): self.assertTrue(((out >= -5) & (out <= 10)).all(), True) with program_guard(Program(), Program()): - x_int64 = paddle.fluid.data( + x_int64 = paddle.static.data( name="x_int64", shape=[10, 12], dtype="int64" ) exe = paddle.static.Executor(self.place) @@ -85,7 +85,7 @@ class TestRandintLikeAPI(unittest.TestCase): self.assertTrue(((out >= -100) & (out <= 100)).all(), True) if paddle.is_compiled_with_cuda(): with program_guard(Program(), Program()): - x_float16 = paddle.fluid.data( + x_float16 = paddle.static.data( name="x_float16", shape=[10, 12], dtype="float16" ) exe = paddle.static.Executor(self.place) @@ -102,7 +102,7 @@ class TestRandintLikeAPI(unittest.TestCase): self.assertTrue(((out >= -3) & (out <= 25)).all(), True) with program_guard(Program(), Program()): - x_float32 = paddle.fluid.data( + x_float32 = paddle.static.data( name="x_float32", shape=[10, 12], dtype="float32" ) exe = paddle.static.Executor(self.place) @@ -119,7 +119,7 @@ class TestRandintLikeAPI(unittest.TestCase): self.assertTrue(((out >= -25) & (out <= 25)).all(), True) with program_guard(Program(), Program()): - x_float64 = paddle.fluid.data( + x_float64 = paddle.static.data( name="x_float64", shape=[10, 12], dtype="float64" ) exe = paddle.static.Executor(self.place) @@ -172,22 +172,22 @@ class TestRandintLikeAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): - x_bool = paddle.fluid.data( + x_bool = paddle.static.data( name="x_bool", shape=[10, 12], dtype="bool" ) - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name="x_int32", shape=[10, 12], dtype="int32" ) - x_int64 = paddle.fluid.data( + x_int64 = paddle.static.data( name="x_int64", shape=[10, 12], dtype="int64" ) - x_float16 = paddle.fluid.data( + x_float16 = paddle.static.data( name="x_float16", shape=[10, 12], dtype="float16" ) - x_float32 = paddle.fluid.data( + x_float32 = paddle.static.data( name="x_float32", shape=[10, 12], dtype="float32" ) - x_float64 = paddle.fluid.data( + x_float64 = paddle.static.data( name="x_float64", shape=[10, 12], dtype="float64" ) diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index f17f6112e8d..3798d8c818d 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -1081,7 +1081,7 @@ class API_TestSumOp(unittest.TestCase): places.append(fluid.CUDAPlace(0)) for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): - data = fluid.data("data", shape=shape, dtype=x_dtype) + data = paddle.static.data("data", shape=shape, dtype=x_dtype) result_sum = paddle.sum( x=data, axis=attr_axis, dtype=attr_dtype ) @@ -1156,7 +1156,7 @@ class TestAllAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[4, 4], dtype="bool") + input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") result = paddle.all(x=input) input_np = np.random.randint(0, 2, [4, 4]).astype("bool") @@ -1213,7 +1213,7 @@ class TestAnyAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[4, 4], dtype="bool") + input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") result = paddle.any(x=input) input_np = np.random.randint(0, 2, [4, 4]).astype("bool") diff --git a/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py b/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py index 4534eac042a..9bd6752b7a1 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py @@ -37,7 +37,7 @@ class TestRnnError(unittest.TestCase): input_size = 16 hidden_size = 16 seq_len = 4 - inputs = fluid.data( + inputs = paddle.static.data( name='inputs', shape=[None, input_size], dtype='float32' ) pre_hidden = paddle.static.data( @@ -45,12 +45,12 @@ class TestRnnError(unittest.TestCase): shape=[None, hidden_size], dtype='float32', ) - inputs_basic_lstm = fluid.data( + inputs_basic_lstm = paddle.static.data( name='inputs_basic_lstm', shape=[None, None, input_size], dtype='float32', ) - sequence_length = fluid.data( + sequence_length = paddle.static.data( name="sequence_length", shape=[None], dtype='int64' ) @@ -161,18 +161,18 @@ class TestRnn(unittest.TestCase): setattr(numpy_cell, k, param) fluid.global_scope().find_var(v.name).get_tensor().set(param, place) - sequence_length = fluid.data( + sequence_length = paddle.static.data( name="sequence_length", shape=[None], dtype='int64' ) - inputs_rnn = fluid.data( + inputs_rnn = paddle.static.data( name='inputs_rnn', shape=[None, None, self.input_size], dtype='float64', ) - pre_hidden = fluid.data( + pre_hidden = paddle.static.data( name='pre_hidden', shape=[None, self.hidden_size], dtype='float64' ) - pre_cell = fluid.data( + pre_cell = paddle.static.data( name='pre_cell', shape=[None, self.hidden_size], dtype='float64' ) diff --git a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py index 2337364efa2..d925e83de25 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_decode_api.py @@ -186,16 +186,20 @@ class SeqPGAgent: def build_program(self, model_cls, alg_cls, model_hparams, alg_hparams): with fluid.program_guard(self.main_program, self.startup_program): - source = fluid.data(name="src", shape=[None, None], dtype="int64") - source_length = fluid.data( + source = paddle.static.data( + name="src", shape=[None, None], dtype="int64" + ) + source_length = paddle.static.data( name="src_sequence_length", shape=[None], dtype="int64" ) # only for teacher-forcing MLE training - target = fluid.data(name="trg", shape=[None, None], dtype="int64") - target_length = fluid.data( + target = paddle.static.data( + name="trg", shape=[None, None], dtype="int64" + ) + target_length = paddle.static.data( name="trg_sequence_length", shape=[None], dtype="int64" ) - label = fluid.data( + label = paddle.static.data( name="label", shape=[None, None, 1], dtype="int64" ) self.model = model_cls(**model_hparams) @@ -204,7 +208,7 @@ class SeqPGAgent: source, source_length, target, target_length ) self.samples.stop_gradient = True - self.reward = fluid.data( + self.reward = paddle.static.data( name="reward", shape=[None, None], # batch_size, seq_len dtype=self.probs.dtype, diff --git a/python/paddle/fluid/tests/unittests/test_rot90_op.py b/python/paddle/fluid/tests/unittests/test_rot90_op.py index a6b249ab190..73e59e1118d 100644 --- a/python/paddle/fluid/tests/unittests/test_rot90_op.py +++ b/python/paddle/fluid/tests/unittests/test_rot90_op.py @@ -28,7 +28,9 @@ class TestRot90_API(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=1, axes=[0, 1]) output = paddle.rot90(output, k=1, axes=[0, 1]) output = output.rot90(k=1, axes=[0, 1]) @@ -53,11 +55,13 @@ class TestRot90_API(unittest.TestCase): def test_static_k_0(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=0, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -80,11 +84,13 @@ class TestRot90_API(unittest.TestCase): def test_static_k_2(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=2, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -107,11 +113,13 @@ class TestRot90_API(unittest.TestCase): def test_static_k_3(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=3, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -134,11 +142,13 @@ class TestRot90_API(unittest.TestCase): def test_static_neg_k_1(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=-1, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -161,11 +171,13 @@ class TestRot90_API(unittest.TestCase): def test_static_neg_k_2(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=-2, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -188,11 +200,13 @@ class TestRot90_API(unittest.TestCase): def test_static_neg_k_3(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=-3, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -215,11 +229,13 @@ class TestRot90_API(unittest.TestCase): def test_static_neg_k_4(self): paddle.enable_static() - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=-4, axes=[0, 1]) place = fluid.CPUPlace() if fluid.core.is_compiled_with_cuda(): @@ -245,32 +261,40 @@ class TestRot90_API(unittest.TestCase): # dims error def run1(): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=1, axes=[0]) self.assertRaises(ValueError, run1) # input dims error def run2(): - input = fluid.data(name='input', dtype='float32', shape=[2]) + input = paddle.static.data(name='input', dtype='float32', shape=[2]) output = paddle.rot90(input, k=1, axes=[0, 1]) self.assertRaises(ValueError, run2) def run3(): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=1, axes=[0, 0]) self.assertRaises(ValueError, run3) def run4(): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=1, axes=[3, 1]) self.assertRaises(ValueError, run4) def run5(): - input = fluid.data(name='input', dtype='float32', shape=[2, 3]) + input = paddle.static.data( + name='input', dtype='float32', shape=[2, 3] + ) output = paddle.rot90(input, k=1, axes=[0, 3]) self.assertRaises(ValueError, run5) diff --git a/python/paddle/fluid/tests/unittests/test_row_conv_op.py b/python/paddle/fluid/tests/unittests/test_row_conv_op.py index 408a5f8a740..0d27282aa3b 100644 --- a/python/paddle/fluid/tests/unittests/test_row_conv_op.py +++ b/python/paddle/fluid/tests/unittests/test_row_conv_op.py @@ -193,7 +193,7 @@ class TestRowConvLayer(unittest.TestCase): main = fluid.Program() with fluid.unique_name.guard(): with fluid.program_guard(main, start): - x = fluid.data("x", (-1, -1, self.C), "float32") + x = paddle.static.data("x", (-1, -1, self.C), "float32") out = paddle.static.nn.row_conv( x, self.context_length, diff --git a/python/paddle/fluid/tests/unittests/test_rrelu_op.py b/python/paddle/fluid/tests/unittests/test_rrelu_op.py index c7523a5f9b3..7fa2c62f602 100644 --- a/python/paddle/fluid/tests/unittests/test_rrelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_rrelu_op.py @@ -59,7 +59,7 @@ class TestFunctionalRReluAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data( + input = paddle.static.data( name="input", shape=[2, 3, 4, 5], dtype="float32" ) res1 = F.rrelu( @@ -97,10 +97,10 @@ class TestFunctionalRReluAPI(unittest.TestCase): for place in self.places: paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=self.x_np.shape, dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=self.x_np.shape, dtype="float64" ) out_1 = F.rrelu(x_1, self.lower_0, self.upper_0, training=False) @@ -140,10 +140,10 @@ class TestFunctionalRReluAPI(unittest.TestCase): for place in self.places: paddle.enable_static() - x_1 = paddle.fluid.data( + x_1 = paddle.static.data( name="x", shape=self.x_np.shape, dtype="float64" ) - x_2 = paddle.fluid.data( + x_2 = paddle.static.data( name="x2", shape=self.x_np.shape, dtype="float64" ) # init instance @@ -223,7 +223,7 @@ class TestFunctionalRReluAPI(unittest.TestCase): TypeError, F.rrelu, x=1, lower=self.lower_0, upper=self.upper_0 ) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[2, 3], dtype='int32' ) self.assertRaises( @@ -233,7 +233,7 @@ class TestFunctionalRReluAPI(unittest.TestCase): lower=self.lower_0, upper=self.upper_0, ) - x_bool = paddle.fluid.data( + x_bool = paddle.static.data( name='x_bool', shape=[2, 3], dtype='int32' ) self.assertRaises( @@ -244,7 +244,7 @@ class TestFunctionalRReluAPI(unittest.TestCase): upper=self.upper_0, ) # lower and upper must be float - x_fp32 = paddle.fluid.data( + x_fp32 = paddle.static.data( name='x_fp32', shape=[2, 3], dtype='float32' ) self.assertRaises(TypeError, F.rrelu, x=x_fp32, lower=0, upper=0.5) @@ -261,7 +261,7 @@ class TestFunctionalRReluAPI(unittest.TestCase): ValueError, F.rrelu, x=x_fp32, lower=0.5, upper=0.2 ) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[2, 3], dtype='float16' ) F.rrelu(x=x_fp16, lower=self.lower_0, upper=self.upper_0) diff --git a/python/paddle/fluid/tests/unittests/test_run_program_op.py b/python/paddle/fluid/tests/unittests/test_run_program_op.py index 0374df968af..13d14c56f56 100644 --- a/python/paddle/fluid/tests/unittests/test_run_program_op.py +++ b/python/paddle/fluid/tests/unittests/test_run_program_op.py @@ -394,7 +394,7 @@ class TestRunProgramOpWithFC(RunProgramOpTest): def build_model(self): # 1. simple model - img = fluid.data( + img = paddle.static.data( name=self.input_names['X'][0], shape=[None, 1, 28, 28], dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/test_scaled_dot_product_attention.py b/python/paddle/fluid/tests/unittests/test_scaled_dot_product_attention.py index 2b0e28adf8f..3afa270f4dc 100644 --- a/python/paddle/fluid/tests/unittests/test_scaled_dot_product_attention.py +++ b/python/paddle/fluid/tests/unittests/test_scaled_dot_product_attention.py @@ -16,6 +16,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid import Program, program_guard @@ -23,11 +24,13 @@ from paddle.fluid import Program, program_guard class TestScaledDotProductAttentionError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - queries = fluid.data( + queries = paddle.static.data( name="queries", shape=[3, 5, 9], dtype="float32" ) - keys = fluid.data(name="keys", shape=[3, 6, 9], dtype="float32") - values = fluid.data( + keys = paddle.static.data( + name="keys", shape=[3, 6, 9], dtype="float32" + ) + values = paddle.static.data( name="values", shape=[3, 6, 10], dtype="float32" ) @@ -56,10 +59,10 @@ class TestScaledDotProductAttentionError(unittest.TestCase): self.assertRaises(TypeError, test_values_Variable) def test_diff_dtype(): - keys_error = fluid.data( + keys_error = paddle.static.data( name="keys_error", shape=[3, 6, 9], dtype="float64" ) - values_error = fluid.data( + values_error = paddle.static.data( name="values_error", shape=[3, 6, 10], dtype="float64" ) fluid.nets.scaled_dot_product_attention( @@ -69,10 +72,10 @@ class TestScaledDotProductAttentionError(unittest.TestCase): self.assertRaises(TypeError, test_diff_dtype) def test_diff_dim(): - keys_error_dim = fluid.data( + keys_error_dim = paddle.static.data( name="keys_error_dim", shape=[3, 6], dtype="float32" ) - values_error_dim = fluid.data( + values_error_dim = paddle.static.data( name="values_error_dim", shape=[3], dtype="float32" ) fluid.nets.scaled_dot_product_attention( @@ -82,10 +85,10 @@ class TestScaledDotProductAttentionError(unittest.TestCase): self.assertRaises(ValueError, test_diff_dim) def test_diff_hidden_size(): - queries_error_hs = fluid.data( + queries_error_hs = paddle.static.data( name="queries_error_hs", shape=[3, 5, 9], dtype="float32" ) - keys_error_hs = fluid.data( + keys_error_hs = paddle.static.data( name="keys_error_hs", shape=[3, 6, 10], dtype="float32" ) fluid.nets.scaled_dot_product_attention( @@ -95,10 +98,10 @@ class TestScaledDotProductAttentionError(unittest.TestCase): self.assertRaises(ValueError, test_diff_hidden_size) def test_diff_max_len(): - keys_error_len = fluid.data( + keys_error_len = paddle.static.data( name="keys_error_len", shape=[3, 7, 9], dtype="float32" ) - values_error_len = fluid.data( + values_error_len = paddle.static.data( name="values_error_len", shape=[3, 6, 10], dtype="float32" ) fluid.nets.scaled_dot_product_attention( diff --git a/python/paddle/fluid/tests/unittests/test_scatter_op.py b/python/paddle/fluid/tests/unittests/test_scatter_op.py index ec810280432..14afd56dec7 100644 --- a/python/paddle/fluid/tests/unittests/test_scatter_op.py +++ b/python/paddle/fluid/tests/unittests/test_scatter_op.py @@ -224,9 +224,13 @@ class TestScatterAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - input = fluid.data(name="input", shape=[3, 2], dtype="float64") - index = fluid.data(name="index", shape=[4], dtype="int64") - updates = fluid.data(name="updates", shape=[4, 2], dtype="float64") + input = paddle.static.data( + name="input", shape=[3, 2], dtype="float64" + ) + index = paddle.static.data(name="index", shape=[4], dtype="int64") + updates = paddle.static.data( + name="updates", shape=[4, 2], dtype="float64" + ) result = self.scatter(input, index, updates, False) input_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float64) diff --git a/python/paddle/fluid/tests/unittests/test_selu_op.py b/python/paddle/fluid/tests/unittests/test_selu_op.py index 1cd638b3783..e3ec3370905 100644 --- a/python/paddle/fluid/tests/unittests/test_selu_op.py +++ b/python/paddle/fluid/tests/unittests/test_selu_op.py @@ -96,7 +96,7 @@ class TestSeluAPI(unittest.TestCase): def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.selu(x, self.scale, self.alpha) selu = paddle.nn.SELU(self.scale, self.alpha) out2 = selu(x) @@ -119,7 +119,7 @@ class TestSeluAPI(unittest.TestCase): def test_fluid_api(self): with fluid.program_guard(fluid.Program()): - x = fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = F.selu(x, self.scale, self.alpha) exe = fluid.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -131,19 +131,19 @@ class TestSeluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.selu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[12, 10], dtype='int32' ) self.assertRaises(TypeError, F.selu, x_int32) # The scale must be greater than 1.0 - x_fp32 = paddle.fluid.data( + x_fp32 = paddle.static.data( name='x_fp32', shape=[12, 10], dtype='float32' ) self.assertRaises(ValueError, F.selu, x_fp32, -1.0) # The alpha must be no less than 0 self.assertRaises(ValueError, F.selu, x_fp32, 1.6, -1.0) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[12, 10], dtype='float16' ) F.selu(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py b/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py index 594de6859ce..23814c824b7 100644 --- a/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py +++ b/python/paddle/fluid/tests/unittests/test_sigmoid_focal_loss.py @@ -42,17 +42,17 @@ def test_static( prog = paddle.static.Program() startup_prog = paddle.static.Program() with paddle.static.program_guard(prog, startup_prog): - logit = paddle.fluid.data( + logit = paddle.static.data( name='logit', shape=logit_np.shape, dtype='float64' ) - label = paddle.fluid.data( + label = paddle.static.data( name='label', shape=label_np.shape, dtype='float64' ) feed_dict = {"logit": logit_np, "label": label_np} normalizer = None if normalizer_np is not None: - normalizer = paddle.fluid.data( + normalizer = paddle.static.data( name='normalizer', shape=normalizer_np.shape, dtype='float64' ) feed_dict["normalizer"] = normalizer_np diff --git a/python/paddle/fluid/tests/unittests/test_size_op.py b/python/paddle/fluid/tests/unittests/test_size_op.py index edef25ed7a7..0a23e7359ef 100644 --- a/python/paddle/fluid/tests/unittests/test_size_op.py +++ b/python/paddle/fluid/tests/unittests/test_size_op.py @@ -69,8 +69,8 @@ class TestSizeAPI(unittest.TestCase): with fluid.program_guard(main_program, startup_program): shape1 = [2, 1, 4, 5] shape2 = [1, 4, 5] - x_1 = paddle.fluid.data(shape=shape1, dtype='int32', name='x_1') - x_2 = paddle.fluid.data(shape=shape2, dtype='int32', name='x_2') + x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1') + x_2 = paddle.static.data(shape=shape2, dtype='int32', name='x_2') input_1 = np.random.random(shape1).astype("int32") input_2 = np.random.random(shape2).astype("int32") out_1 = paddle.numel(x_1) diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index b8b0ccb8417..59725ecf040 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -720,9 +720,15 @@ class TestSliceApiWithLoDTensorArray(unittest.TestCase): def set_program_and_run(self, main_program, case_num): with fluid.program_guard(main_program): x = [ - fluid.data(name='x0', shape=self.shape, dtype="float32"), - fluid.data(name='x1', shape=self.shape, dtype="float32"), - fluid.data(name='x2', shape=self.shape, dtype="float32"), + paddle.static.data( + name='x0', shape=self.shape, dtype="float32" + ), + paddle.static.data( + name='x1', shape=self.shape, dtype="float32" + ), + paddle.static.data( + name='x2', shape=self.shape, dtype="float32" + ), ] for each_x in x: diff --git a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py index 6f1565c0939..821733c3a71 100644 --- a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py +++ b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py @@ -54,8 +54,12 @@ class SmoothL1Loss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype='float32') - label = fluid.data(name='label', shape=[100, 200], dtype='float32') + input = paddle.static.data( + name='input', shape=[100, 200], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[100, 200], dtype='float32' + ) smooth_l1_loss = paddle.nn.loss.SmoothL1Loss() ret = smooth_l1_loss(input, label) @@ -93,8 +97,12 @@ class SmoothL1Loss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype='float32') - label = fluid.data(name='label', shape=[100, 200], dtype='float32') + input = paddle.static.data( + name='input', shape=[100, 200], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[100, 200], dtype='float32' + ) smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='sum') ret = smooth_l1_loss(input, label) @@ -132,8 +140,12 @@ class SmoothL1Loss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype='float32') - label = fluid.data(name='label', shape=[100, 200], dtype='float32') + input = paddle.static.data( + name='input', shape=[100, 200], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[100, 200], dtype='float32' + ) smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='none') ret = smooth_l1_loss(input, label) @@ -172,8 +184,12 @@ class SmoothL1Loss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.data(name='input', shape=[100, 200], dtype='float32') - label = fluid.data(name='label', shape=[100, 200], dtype='float32') + input = paddle.static.data( + name='input', shape=[100, 200], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[100, 200], dtype='float32' + ) smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(delta=delta) ret = smooth_l1_loss(input, label) diff --git a/python/paddle/fluid/tests/unittests/test_softmax2d.py b/python/paddle/fluid/tests/unittests/test_softmax2d.py index 61d4bb93106..cb24181cf3d 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax2d.py +++ b/python/paddle/fluid/tests/unittests/test_softmax2d.py @@ -35,7 +35,7 @@ class TestSoftmax2DAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) m = paddle.nn.Softmax2D() out = m(x) exe = paddle.static.Executor(self.place) @@ -111,7 +111,7 @@ class TestSoftmax2DError(unittest.TestCase): def test_static_error(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [5, 5], 'float32') + x = paddle.static.data('X', [5, 5], 'float32') m = paddle.nn.Softmax2D() self.assertRaises(AssertionError, m, x) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py index f56e1585605..90975850466 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_op.py @@ -89,8 +89,10 @@ class TestSoftmaxMaskFuseOp0(OpTest): class TestDropoutBiasFuseOp3(unittest.TestCase): def test_static_result(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = fluid.data(name="x", shape=[1, 1, 8, 32], dtype="float32") - input_mask = fluid.data( + input_x = paddle.static.data( + name="x", shape=[1, 1, 8, 32], dtype="float32" + ) + input_mask = paddle.static.data( name="mask", shape=[1, 1, 8, 32], dtype="float32" ) rst = incubate.softmax_mask_fuse(input_x, input_mask) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py index 8d6d866fe91..ddc894314a8 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py @@ -92,7 +92,7 @@ class TestDropoutBiasFuseOp2(unittest.TestCase): def test_static(self): for dtype in self.dtypes: with fluid.program_guard(fluid.Program(), fluid.Program()): - input_x = fluid.data( + input_x = paddle.static.data( name="x", shape=[1, 4, 32, 32], dtype=dtype ) rst = incubate.softmax_mask_fuse_upper_triangle(input_x) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 943e9ce0713..860541f3710 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -462,7 +462,7 @@ class TestSoftmaxAPI(unittest.TestCase): def test_static_check(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, 'float32') + x = paddle.static.data('X', self.x_np.shape, 'float32') out1 = self.softmax(x) m = paddle.nn.Softmax() out2 = m(x) @@ -508,12 +508,12 @@ class TestSoftmaxAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, self.softmax, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[2, 3], dtype='int32' ) self.assertRaises(TypeError, self.softmax, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[2, 3], dtype='float16' ) self.softmax(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/test_solve_op.py b/python/paddle/fluid/tests/unittests/test_solve_op.py index d1598954a43..9a0fb7a7235 100644 --- a/python/paddle/fluid/tests/unittests/test_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_solve_op.py @@ -270,30 +270,30 @@ class TestSolveOpError(unittest.TestCase): self.assertRaises(TypeError, paddle.linalg.solve, x1, y1) # The data type of input must be float32 or float64. - x2 = fluid.data(name="x2", shape=[30, 30], dtype="bool") - y2 = fluid.data(name="y2", shape=[30, 10], dtype="bool") + x2 = paddle.static.data(name="x2", shape=[30, 30], dtype="bool") + y2 = paddle.static.data(name="y2", shape=[30, 10], dtype="bool") self.assertRaises(TypeError, paddle.linalg.solve, x2, y2) - x3 = fluid.data(name="x3", shape=[30, 30], dtype="int32") - y3 = fluid.data(name="y3", shape=[30, 10], dtype="int32") + x3 = paddle.static.data(name="x3", shape=[30, 30], dtype="int32") + y3 = paddle.static.data(name="y3", shape=[30, 10], dtype="int32") self.assertRaises(TypeError, paddle.linalg.solve, x3, y3) - x4 = fluid.data(name="x4", shape=[30, 30], dtype="int64") - y4 = fluid.data(name="y4", shape=[30, 10], dtype="int64") + x4 = paddle.static.data(name="x4", shape=[30, 30], dtype="int64") + y4 = paddle.static.data(name="y4", shape=[30, 10], dtype="int64") self.assertRaises(TypeError, paddle.linalg.solve, x4, y4) - x5 = fluid.data(name="x5", shape=[30, 30], dtype="float16") - y5 = fluid.data(name="y5", shape=[30, 10], dtype="float16") + x5 = paddle.static.data(name="x5", shape=[30, 30], dtype="float16") + y5 = paddle.static.data(name="y5", shape=[30, 10], dtype="float16") self.assertRaises(TypeError, paddle.linalg.solve, x5, y5) # The number of dimensions of input'X must be >= 2. - x6 = fluid.data(name="x6", shape=[30], dtype="float64") - y6 = fluid.data(name="y6", shape=[30], dtype="float64") + x6 = paddle.static.data(name="x6", shape=[30], dtype="float64") + y6 = paddle.static.data(name="y6", shape=[30], dtype="float64") self.assertRaises(ValueError, paddle.linalg.solve, x6, y6) # The inner-most 2 dimensions of input'X should be equal to each other - x7 = fluid.data(name="x7", shape=[2, 3, 4], dtype="float64") - y7 = fluid.data(name="y7", shape=[2, 4, 3], dtype="float64") + x7 = paddle.static.data(name="x7", shape=[2, 3, 4], dtype="float64") + y7 = paddle.static.data(name="y7", shape=[2, 4, 3], dtype="float64") self.assertRaises(ValueError, paddle.linalg.solve, x7, y7) @@ -308,10 +308,10 @@ class TestSolveOpAPI_1(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - paddle_input_x = fluid.data( + paddle_input_x = paddle.static.data( name="input_x", shape=[3, 3], dtype=self.dtype ) - paddle_input_y = fluid.data( + paddle_input_y = paddle.static.data( name="input_y", shape=[3], dtype=self.dtype ) paddle_result = paddle.linalg.solve(paddle_input_x, paddle_input_y) @@ -369,10 +369,10 @@ class TestSolveOpAPI_2(unittest.TestCase): def check_static_result(self, place): paddle.enable_static() with fluid.program_guard(fluid.Program(), fluid.Program()): - paddle_input_x = fluid.data( + paddle_input_x = paddle.static.data( name="input_x", shape=[10, 10], dtype=self.dtype ) - paddle_input_y = fluid.data( + paddle_input_y = paddle.static.data( name="input_y", shape=[10, 4], dtype=self.dtype ) paddle_result = paddle.linalg.solve(paddle_input_x, paddle_input_y) @@ -429,10 +429,10 @@ class TestSolveOpAPI_3(unittest.TestCase): def check_static_result(self, place): paddle.enable_static() with fluid.program_guard(fluid.Program(), fluid.Program()): - paddle_input_x = fluid.data( + paddle_input_x = paddle.static.data( name="input_x", shape=[10, 10], dtype=self.dtype ) - paddle_input_y = fluid.data( + paddle_input_y = paddle.static.data( name="input_y", shape=[10, 4], dtype=self.dtype ) paddle_result = paddle.linalg.solve(paddle_input_x, paddle_input_y) @@ -489,10 +489,10 @@ class TestSolveOpAPI_4(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - paddle_input_x = fluid.data( + paddle_input_x = paddle.static.data( name="input_x", shape=[2, 3, 3], dtype=self.dtype ) - paddle_input_y = fluid.data( + paddle_input_y = paddle.static.data( name="input_y", shape=[1, 3, 3], dtype=self.dtype ) paddle_result = paddle.linalg.solve(paddle_input_x, paddle_input_y) @@ -548,8 +548,8 @@ class TestSolveOpSingularAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - x = fluid.data(name="x", shape=[4, 4], dtype=self.dtype) - y = fluid.data(name="y", shape=[4, 4], dtype=self.dtype) + x = paddle.static.data(name="x", shape=[4, 4], dtype=self.dtype) + y = paddle.static.data(name="y", shape=[4, 4], dtype=self.dtype) result = paddle.linalg.solve(x, y) diff --git a/python/paddle/fluid/tests/unittests/test_sort_op.py b/python/paddle/fluid/tests/unittests/test_sort_op.py index 3f8666c8e7c..a933187d273 100644 --- a/python/paddle/fluid/tests/unittests/test_sort_op.py +++ b/python/paddle/fluid/tests/unittests/test_sort_op.py @@ -27,7 +27,9 @@ class TestSortOnCPU(unittest.TestCase): def test_api_0(self): with fluid.program_guard(fluid.Program()): - input = fluid.data(name="input", shape=[2, 3, 4], dtype="float32") + input = paddle.static.data( + name="input", shape=[2, 3, 4], dtype="float32" + ) output = paddle.sort(x=input) exe = fluid.Executor(self.place) data = np.array( @@ -43,7 +45,9 @@ class TestSortOnCPU(unittest.TestCase): def test_api_1(self): with fluid.program_guard(fluid.Program()): - input = fluid.data(name="input", shape=[2, 3, 4], dtype="float32") + input = paddle.static.data( + name="input", shape=[2, 3, 4], dtype="float32" + ) output = paddle.sort(x=input, axis=1) exe = fluid.Executor(self.place) data = np.array( diff --git a/python/paddle/fluid/tests/unittests/test_split_op.py b/python/paddle/fluid/tests/unittests/test_split_op.py index 26f5dcc2943..1d0b048b617 100644 --- a/python/paddle/fluid/tests/unittests/test_split_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_op.py @@ -287,8 +287,8 @@ class TestSplitAPI(unittest.TestCase): positive_1_int32 = paddle.tensor.fill_constant([1], "int32", 1) positive_1_int64 = paddle.tensor.fill_constant([1], "int64", 1) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) - x_1 = fluid.data(shape=[4, 5, 6], dtype='int32', name='x_1') - x_2 = fluid.data(shape=[4, 5, None], dtype='int32', name='x_2') + x_1 = paddle.static.data(shape=[4, 5, 6], dtype='int32', name='x_1') + x_2 = paddle.static.data(shape=[4, 5, None], dtype='int32', name='x_2') out_0, out_1, out_2 = paddle.split( x=x_1, diff --git a/python/paddle/fluid/tests/unittests/test_splits_api.py b/python/paddle/fluid/tests/unittests/test_splits_api.py index 40083388d63..3ecb0ca9278 100644 --- a/python/paddle/fluid/tests/unittests/test_splits_api.py +++ b/python/paddle/fluid/tests/unittests/test_splits_api.py @@ -56,7 +56,7 @@ class TestSplitsAPI(unittest.TestCase): paddle.enable_static() for func, func_type in test_list: with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = func(x, self.num_or_sections) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) @@ -170,7 +170,7 @@ class TestSplitsError(unittest.TestCase): paddle.enable_static() for func, _ in test_list: with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [5], 'float32') + x = paddle.static.data('X', [5], 'float32') self.assertRaises(ValueError, func, x, self.num_or_sections) def test_dygraph_error(self): diff --git a/python/paddle/fluid/tests/unittests/test_square_error_cost.py b/python/paddle/fluid/tests/unittests/test_square_error_cost.py index afd16a30957..db015ae3135 100644 --- a/python/paddle/fluid/tests/unittests/test_square_error_cost.py +++ b/python/paddle/fluid/tests/unittests/test_square_error_cost.py @@ -55,13 +55,17 @@ class TestSquareErrorInvalidInput(unittest.TestCase): def test_error(self): def test_invalid_input(): input = [256, 3] - label = fluid.data(name='label1', shape=[None, 3], dtype='float32') + label = paddle.static.data( + name='label1', shape=[None, 3], dtype='float32' + ) loss = paddle.nn.functional.square_error_cost(input, label) self.assertRaises(TypeError, test_invalid_input) def test_invalid_label(): - input = fluid.data(name='input2', shape=[None, 3], dtype='float32') + input = paddle.static.data( + name='input2', shape=[None, 3], dtype='float32' + ) label = [256, 3] loss = paddle.nn.functional.square_error_cost(input, label) diff --git a/python/paddle/fluid/tests/unittests/test_static_save_load.py b/python/paddle/fluid/tests/unittests/test_static_save_load.py index 03695262e75..07a3e114c49 100644 --- a/python/paddle/fluid/tests/unittests/test_static_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_static_save_load.py @@ -881,7 +881,7 @@ class TestVariableInit(unittest.TestCase): def test_variable_init(self): - x = fluid.data(name="x", shape=[10, 10], dtype='float32') + x = paddle.static.data(name="x", shape=[10, 10], dtype='float32') y = paddle.static.nn.fc(x, 10) z = paddle.static.nn.fc(y, 10) diff --git a/python/paddle/fluid/tests/unittests/test_std_layer.py b/python/paddle/fluid/tests/unittests/test_std_layer.py index 8f8899f47f7..7cbf235699b 100644 --- a/python/paddle/fluid/tests/unittests/test_std_layer.py +++ b/python/paddle/fluid/tests/unittests/test_std_layer.py @@ -48,7 +48,7 @@ class TestStdAPI(unittest.TestCase): def static(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape, self.dtype) + x = paddle.static.data('X', self.shape, self.dtype) out = paddle.std(x, self.axis, self.unbiased, self.keepdim) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) @@ -115,7 +115,7 @@ class TestStdAPI_alias(unittest.TestCase): class TestStdError(unittest.TestCase): def test_error(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [2, 3, 4], 'int32') + x = paddle.static.data('X', [2, 3, 4], 'int32') self.assertRaises(TypeError, paddle.std, x) diff --git a/python/paddle/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py index a0c0d7757e5..c485449b0c2 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_op.py @@ -437,14 +437,14 @@ class TestRaiseSumError(unittest.TestCase): self.assertRaises(TypeError, test_type) def test_dtype(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") - data2 = fluid.data(name="input2", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") + data2 = paddle.static.data(name="input2", shape=[10], dtype="int8") paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_dtype) def test_dtype1(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") paddle.add_n(data1) self.assertRaises(TypeError, test_dtype1) @@ -458,30 +458,38 @@ class TestRaiseSumsError(unittest.TestCase): self.assertRaises(TypeError, test_type) def test_dtype(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") - data2 = fluid.data(name="input2", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") + data2 = paddle.static.data(name="input2", shape=[10], dtype="int8") paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_dtype) def test_dtype1(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") paddle.add_n(data1) self.assertRaises(TypeError, test_dtype1) def test_out_type(): - data1 = fluid.data(name="input1", shape=[10], dtype="flaot32") - data2 = fluid.data(name="input2", shape=[10], dtype="float32") + data1 = paddle.static.data( + name="input1", shape=[10], dtype="flaot32" + ) + data2 = paddle.static.data( + name="input2", shape=[10], dtype="float32" + ) out = [10] out = paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_out_type) def test_out_dtype(): - data1 = fluid.data(name="input1", shape=[10], dtype="flaot32") - data2 = fluid.data(name="input2", shape=[10], dtype="float32") - out = fluid.data(name="out", shape=[10], dtype="int8") + data1 = paddle.static.data( + name="input1", shape=[10], dtype="flaot32" + ) + data2 = paddle.static.data( + name="input2", shape=[10], dtype="float32" + ) + out = paddle.static.data(name="out", shape=[10], dtype="int8") out = paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_out_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_svd_op.py b/python/paddle/fluid/tests/unittests/test_svd_op.py index a760fef4ff2..f17c80fae6e 100644 --- a/python/paddle/fluid/tests/unittests/test_svd_op.py +++ b/python/paddle/fluid/tests/unittests/test_svd_op.py @@ -307,7 +307,7 @@ class TestSvdAPI(unittest.TestCase): for place in places: with fluid.program_guard(fluid.Program(), fluid.Program()): a = np.random.rand(5, 5) - x = paddle.fluid.data( + x = paddle.static.data( name="input", shape=[5, 5], dtype='float64' ) u, s, vh = paddle.linalg.svd(x) diff --git a/python/paddle/fluid/tests/unittests/test_switch_case.py b/python/paddle/fluid/tests/unittests/test_switch_case.py index 5a1e8fb451b..de8763b27c3 100644 --- a/python/paddle/fluid/tests/unittests/test_switch_case.py +++ b/python/paddle/fluid/tests/unittests/test_switch_case.py @@ -441,7 +441,9 @@ class TestAPISwitchCase_Nested(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - index_1 = fluid.data(name="index_1", shape=[1], dtype='uint8') + index_1 = paddle.static.data( + name="index_1", shape=[1], dtype='uint8' + ) index_2 = paddle.tensor.fill_constant( shape=[1], dtype='int32', value=2 ) @@ -540,7 +542,9 @@ class TestAPISwitchCase_Nested(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - index_1 = fluid.data(name="index_1", shape=[1], dtype='uint8') + index_1 = paddle.static.data( + name="index_1", shape=[1], dtype='uint8' + ) index_2 = paddle.full(shape=[], dtype='int32', fill_value=2) index_3 = paddle.full(shape=[], dtype='int64', fill_value=3) diff --git a/python/paddle/fluid/tests/unittests/test_take.py b/python/paddle/fluid/tests/unittests/test_take.py index bf16efa87ab..6a4b9702c5c 100644 --- a/python/paddle/fluid/tests/unittests/test_take.py +++ b/python/paddle/fluid/tests/unittests/test_take.py @@ -55,10 +55,10 @@ class TestTakeAPI(unittest.TestCase): startup_program = Program() train_program = Program() with program_guard(startup_program, train_program): - x = fluid.data( + x = paddle.static.data( name='input', dtype=self.input_dtype, shape=self.input_shape ) - index = fluid.data( + index = paddle.static.data( name='index', dtype=self.index_dtype, shape=self.index_shape ) out = paddle.take(x, index, mode=self.mode) @@ -116,7 +116,7 @@ class TestTakeTypeError(TestTakeAPI): """Argument 'index' must be Tensor""" paddle.enable_static() with program_guard(Program()): - x = fluid.data( + x = paddle.static.data( name='input', dtype=self.input_dtype, shape=self.input_shape ) self.assertRaises( @@ -132,10 +132,10 @@ class TestTakeTypeError(TestTakeAPI): """Data type of argument 'index' must be in [paddle.int32, paddle.int64]""" paddle.enable_static() with program_guard(Program()): - x = fluid.data( + x = paddle.static.data( name='input', dtype='float64', shape=self.input_shape ) - index = fluid.data( + index = paddle.static.data( name='index', dtype='float32', shape=self.index_shape ) self.assertRaises(TypeError, paddle.take, x, index, self.mode) @@ -184,10 +184,10 @@ class TestTakeModeRaisePos(unittest.TestCase): an error is reported directly through `paddle.index_select`""" paddle.enable_static() with program_guard(Program()): - x = fluid.data( + x = paddle.static.data( name='input', dtype=self.input_dtype, shape=self.input_shape ) - index = fluid.data( + index = paddle.static.data( name='index', dtype=self.index_dtype, shape=self.index_shape ) self.assertRaises(ValueError, paddle.index_select, x, index) diff --git a/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py b/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py index 7abd86d19f6..b5a9c2169ff 100644 --- a/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py +++ b/python/paddle/fluid/tests/unittests/test_take_along_axis_op.py @@ -83,8 +83,8 @@ class TestTakeAlongAxisAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) - index = paddle.fluid.data('Index', self.index_shape, "int64") + x = paddle.static.data('X', self.shape) + index = paddle.static.data('Index', self.index_shape, "int64") out = paddle.take_along_axis(x, index, self.axis) exe = paddle.static.Executor(self.place[0]) res = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_trace_op.py b/python/paddle/fluid/tests/unittests/test_trace_op.py index b86422de074..a2369448bc6 100644 --- a/python/paddle/fluid/tests/unittests/test_trace_op.py +++ b/python/paddle/fluid/tests/unittests/test_trace_op.py @@ -72,7 +72,9 @@ class TestTraceOpCase2(TestTraceOp): class TestTraceAPICase(unittest.TestCase): def test_case1(self): case = np.random.randn(2, 20, 2, 3).astype('float32') - data1 = fluid.data(name='data1', shape=[2, 20, 2, 3], dtype='float32') + data1 = paddle.static.data( + name='data1', shape=[2, 20, 2, 3], dtype='float32' + ) out1 = tensor.trace(data1) out2 = tensor.trace(data1, offset=-5, axis1=1, axis2=-1) diff --git a/python/paddle/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py index 54b98551e2b..e0ed1f2f201 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_transpose_op.py @@ -456,7 +456,7 @@ class TestTransposeApi(unittest.TestCase): class TestTAPI(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program()): - data = fluid.data(shape=[10], dtype="float64", name="data") + data = paddle.static.data(shape=[10], dtype="float64", name="data") data_t = paddle.t(data) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -466,7 +466,9 @@ class TestTAPI(unittest.TestCase): self.assertEqual((result == expected_result).all(), True) with fluid.program_guard(fluid.Program()): - data = fluid.data(shape=[10, 5], dtype="float64", name="data") + data = paddle.static.data( + shape=[10, 5], dtype="float64", name="data" + ) data_t = paddle.t(data) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -476,7 +478,9 @@ class TestTAPI(unittest.TestCase): self.assertEqual((result == expected_result).all(), True) with fluid.program_guard(fluid.Program()): - data = fluid.data(shape=[1, 5], dtype="float64", name="data") + data = paddle.static.data( + shape=[1, 5], dtype="float64", name="data" + ) data_t = paddle.t(data) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -511,7 +515,7 @@ class TestTAPI(unittest.TestCase): def test_errors(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name='x', shape=[10, 5, 3], dtype='float64') + x = paddle.static.data(name='x', shape=[10, 5, 3], dtype='float64') def test_x_dimension_check(): paddle.t(x) diff --git a/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py b/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py index 802cf4f9a62..2bd6853f73f 100644 --- a/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py +++ b/python/paddle/fluid/tests/unittests/test_triangular_solve_op.py @@ -258,8 +258,8 @@ class TestTriangularSolveAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - x = fluid.data(name="x", shape=[3, 3], dtype=self.dtype) - y = fluid.data(name="y", shape=[3, 2], dtype=self.dtype) + x = paddle.static.data(name="x", shape=[3, 3], dtype=self.dtype) + y = paddle.static.data(name="y", shape=[3, 2], dtype=self.dtype) z = paddle.linalg.triangular_solve(x, y) x_np = np.random.random([3, 3]).astype(self.dtype) @@ -310,35 +310,35 @@ class TestTriangularSolveOpError(unittest.TestCase): self.assertRaises(TypeError, paddle.linalg.triangular_solve, x1, y1) # The data type of input must be float32 or float64. - x2 = fluid.data(name="x2", shape=[30, 30], dtype="bool") - y2 = fluid.data(name="y2", shape=[30, 10], dtype="bool") + x2 = paddle.static.data(name="x2", shape=[30, 30], dtype="bool") + y2 = paddle.static.data(name="y2", shape=[30, 10], dtype="bool") self.assertRaises(TypeError, paddle.linalg.triangular_solve, x2, y2) - x3 = fluid.data(name="x3", shape=[30, 30], dtype="int32") - y3 = fluid.data(name="y3", shape=[30, 10], dtype="int32") + x3 = paddle.static.data(name="x3", shape=[30, 30], dtype="int32") + y3 = paddle.static.data(name="y3", shape=[30, 10], dtype="int32") self.assertRaises(TypeError, paddle.linalg.triangular_solve, x3, y3) - x4 = fluid.data(name="x4", shape=[30, 30], dtype="float16") - y4 = fluid.data(name="y4", shape=[30, 10], dtype="float16") + x4 = paddle.static.data(name="x4", shape=[30, 30], dtype="float16") + y4 = paddle.static.data(name="y4", shape=[30, 10], dtype="float16") self.assertRaises(TypeError, paddle.linalg.triangular_solve, x4, y4) # The number of dimensions of input'X must be >= 2. - x5 = fluid.data(name="x5", shape=[30], dtype="float64") - y5 = fluid.data(name="y5", shape=[30, 30], dtype="float64") + x5 = paddle.static.data(name="x5", shape=[30], dtype="float64") + y5 = paddle.static.data(name="y5", shape=[30, 30], dtype="float64") self.assertRaises( ValueError, paddle.linalg.triangular_solve, x5, y5 ) # The number of dimensions of input'Y must be >= 2. - x6 = fluid.data(name="x6", shape=[30, 30], dtype="float64") - y6 = fluid.data(name="y6", shape=[30], dtype="float64") + x6 = paddle.static.data(name="x6", shape=[30, 30], dtype="float64") + y6 = paddle.static.data(name="y6", shape=[30], dtype="float64") self.assertRaises( ValueError, paddle.linalg.triangular_solve, x6, y6 ) # The inner-most 2 dimensions of input'X should be equal to each other - x7 = fluid.data(name="x7", shape=[2, 3, 4], dtype="float64") - y7 = fluid.data(name="y7", shape=[2, 4, 3], dtype="float64") + x7 = paddle.static.data(name="x7", shape=[2, 3, 4], dtype="float64") + y7 = paddle.static.data(name="y7", shape=[2, 4, 3], dtype="float64") self.assertRaises( ValueError, paddle.linalg.triangular_solve, x7, y7 ) diff --git a/python/paddle/fluid/tests/unittests/test_tril_triu_op.py b/python/paddle/fluid/tests/unittests/test_tril_triu_op.py index f35fc67bb2d..ee62930019d 100644 --- a/python/paddle/fluid/tests/unittests/test_tril_triu_op.py +++ b/python/paddle/fluid/tests/unittests/test_tril_triu_op.py @@ -78,7 +78,9 @@ def case_generator(op_type, Xshape, diagonal, expected): def test_failure(self): paddle.enable_static() - data = fluid.data(shape=Xshape, dtype='float64', name=cls_name) + data = paddle.static.data( + shape=Xshape, dtype='float64', name=cls_name + ) with self.assertRaisesRegex( eval(expected.split(':')[-1]), errmsg[expected] ): @@ -143,7 +145,9 @@ class TestTrilTriuOpAPI(unittest.TestCase): startup_prog = Program() with program_guard(prog, startup_prog): data = np.random.random([1, 9, 9, 4]).astype(dtype) - x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') + x = paddle.static.data( + shape=[1, 9, -1, 4], dtype=dtype, name='x' + ) tril_out, triu_out = tensor.tril(x), tensor.triu(x) place = ( @@ -184,7 +188,9 @@ class TestTrilTriuOpAPI(unittest.TestCase): startup_prog = Program() with program_guard(prog, startup_prog): data = np.random.random([1, 9, 9, 4]).astype(dtype) - x = fluid.data(shape=[1, 9, -1, 4], dtype=dtype, name='x') + x = paddle.static.data( + shape=[1, 9, -1, 4], dtype=dtype, name='x' + ) triu_out = paddle.triu(x) place = ( diff --git a/python/paddle/fluid/tests/unittests/test_trunc_op.py b/python/paddle/fluid/tests/unittests/test_trunc_op.py index db45b36b563..8d8f0ce2b3b 100644 --- a/python/paddle/fluid/tests/unittests/test_trunc_op.py +++ b/python/paddle/fluid/tests/unittests/test_trunc_op.py @@ -68,7 +68,7 @@ class TestTruncAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) out = paddle.trunc(x) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) @@ -86,7 +86,7 @@ class TestTruncAPI(unittest.TestCase): def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [20, 20], 'bool') + x = paddle.static.data('X', [20, 20], 'bool') self.assertRaises(TypeError, paddle.trunc, x) diff --git a/python/paddle/fluid/tests/unittests/test_unbind_op.py b/python/paddle/fluid/tests/unittests/test_unbind_op.py index cf1beb5bc87..9df2e1958d4 100644 --- a/python/paddle/fluid/tests/unittests/test_unbind_op.py +++ b/python/paddle/fluid/tests/unittests/test_unbind_op.py @@ -27,10 +27,10 @@ class TestUnbind(unittest.TestCase): def test_unbind(self): paddle.enable_static() - x_1 = fluid.data(shape=[2, 3], dtype='float32', name='x_1') + x_1 = paddle.static.data(shape=[2, 3], dtype='float32', name='x_1') [out_0, out_1] = tensor.unbind(input=x_1, axis=0) input_1 = np.random.random([2, 3]).astype("float32") - axis = fluid.data(shape=[1], dtype='int32', name='axis') + axis = paddle.static.data(shape=[1], dtype='int32', name='axis') exe = fluid.Executor(place=fluid.CPUPlace()) [res_1, res_2] = exe.run( @@ -85,10 +85,10 @@ class TestLayersUnbind(unittest.TestCase): def test_layers_unbind(self): paddle.enable_static() - x_1 = fluid.data(shape=[2, 3], dtype='float32', name='x_1') + x_1 = paddle.static.data(shape=[2, 3], dtype='float32', name='x_1') [out_0, out_1] = paddle.unbind(input=x_1, axis=0) input_1 = np.random.random([2, 3]).astype("float32") - axis = fluid.data(shape=[1], dtype='int32', name='axis') + axis = paddle.static.data(shape=[1], dtype='int32', name='axis') exe = fluid.Executor(place=fluid.CPUPlace()) [res_1, res_2] = exe.run( @@ -235,7 +235,7 @@ class TestUnbindBF16Op(OpTest): class TestUnbindAxisError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x = fluid.data(shape=[2, 3], dtype='float32', name='x') + x = paddle.static.data(shape=[2, 3], dtype='float32', name='x') def test_table_Variable(): tensor.unbind(input=x, axis=2.0) diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py index 0dcdf0cc250..032f22ae835 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_bf16_op.py @@ -243,7 +243,9 @@ class TestUniformRandomBatchSizeLikeOpBF16API(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - input = fluid.data(name="input", shape=[1, 3], dtype='uint16') + input = paddle.static.data( + name="input", shape=[1, 3], dtype='uint16' + ) out_1 = random.uniform_random_batch_size_like( input, [2, 4], dtype=np.uint16 ) # out_1.shape=[1, 4] diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 3a03b7c0dce..77509851fab 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -360,7 +360,9 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase): startup_program = fluid.Program() train_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - shape = fluid.data(name='shape_tensor', shape=[2], dtype="int32") + shape = paddle.static.data( + name='shape_tensor', shape=[2], dtype="int32" + ) ret = paddle.uniform(shape) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_unique.py b/python/paddle/fluid/tests/unittests/test_unique.py index b3ae10a6c33..5060e2fcf5f 100644 --- a/python/paddle/fluid/tests/unittests/test_unique.py +++ b/python/paddle/fluid/tests/unittests/test_unique.py @@ -18,7 +18,6 @@ import numpy as np from op_test import OpTest import paddle -import paddle.fluid as fluid import paddle.fluid.core as core @@ -82,7 +81,7 @@ class TestUniqueRaiseError(unittest.TestCase): self.assertRaises(TypeError, test_type) def test_dtype(): - data = fluid.data(shape=[10], dtype="float16", name="input") + data = paddle.static.data(shape=[10], dtype="float16", name="input") paddle.unique(data) self.assertRaises(TypeError, test_dtype) @@ -295,7 +294,7 @@ class TestUniqueAPI(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[3, 2], dtype='float64') + x = paddle.static.data(name='x', shape=[3, 2], dtype='float64') unique, inverse, counts = paddle.unique( x, return_inverse=True, return_counts=True, axis=0 ) @@ -320,14 +319,16 @@ class TestUniqueError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float16') + x = paddle.static.data( + name='x', shape=[10, 10], dtype='float16' + ) result = paddle.unique(x) self.assertRaises(TypeError, test_x_dtype) def test_attr(self): paddle.enable_static() - x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float64') + x = paddle.static.data(name='x', shape=[10, 10], dtype='float64') def test_return_index(): result = paddle.unique(x, return_index=0) diff --git a/python/paddle/fluid/tests/unittests/test_unique_consecutive_op.py b/python/paddle/fluid/tests/unittests/test_unique_consecutive_op.py index 1cf7714844c..970cddbe36b 100644 --- a/python/paddle/fluid/tests/unittests/test_unique_consecutive_op.py +++ b/python/paddle/fluid/tests/unittests/test_unique_consecutive_op.py @@ -206,7 +206,7 @@ class TestUniqueConsecutiveAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): paddle.enable_static() - input_x = fluid.data( + input_x = paddle.static.data( name="input_x", shape=[ 100, @@ -243,7 +243,7 @@ class TestUniqueConsecutiveCase2API(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): paddle.enable_static() - input_x = fluid.data( + input_x = paddle.static.data( name="input_x", shape=[ 100, @@ -284,7 +284,7 @@ class TestUniqueConsecutiveCase3API(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): paddle.enable_static() - input_x = fluid.data( + input_x = paddle.static.data( name="input_x", shape=[ 100, diff --git a/python/paddle/fluid/tests/unittests/test_unique_with_counts.py b/python/paddle/fluid/tests/unittests/test_unique_with_counts.py index ccbf56c6647..a4f553ddc43 100644 --- a/python/paddle/fluid/tests/unittests/test_unique_with_counts.py +++ b/python/paddle/fluid/tests/unittests/test_unique_with_counts.py @@ -18,7 +18,6 @@ import numpy as np from op_test import OpTest import paddle -import paddle.fluid as fluid import paddle.fluid.core as core @@ -88,7 +87,7 @@ class TestUniqueWithCountsRaiseError(unittest.TestCase): self.assertRaises(TypeError, test_type) def test_dtype(): - data = fluid.data(shape=[10], dtype="float16", name="input") + data = paddle.static.data(shape=[10], dtype="float16", name="input") paddle.unique(data) self.assertRaises(TypeError, test_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_unpool1d_op.py b/python/paddle/fluid/tests/unittests/test_unpool1d_op.py index 72a31461859..2adf212f9ff 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool1d_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool1d_op.py @@ -148,7 +148,7 @@ class TestUnpool1DOpAPI_static(unittest.TestCase): input_data = np.array( [[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]] ).astype("float32") - x = paddle.fluid.data( + x = paddle.static.data( name='x', shape=[1, 3, 4], dtype='float32' ) output, indices = F.max_pool1d( diff --git a/python/paddle/fluid/tests/unittests/test_unpool3d_op.py b/python/paddle/fluid/tests/unittests/test_unpool3d_op.py index aa4da0b7c10..ea2c3cc3d2f 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool3d_op.py @@ -403,7 +403,7 @@ class TestUnpool3DOpAPI_static(unittest.TestCase): ] ] ).astype("float32") - x = paddle.fluid.data( + x = paddle.static.data( name='x', shape=[1, 1, 2, 4, 4], dtype='float32' ) output, indices = F.max_pool3d( diff --git a/python/paddle/fluid/tests/unittests/test_unpool_op.py b/python/paddle/fluid/tests/unittests/test_unpool_op.py index e5eefc067e8..16eb8ffb3a0 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool_op.py @@ -411,7 +411,7 @@ class TestUnpoolOpAPI_st(unittest.TestCase): [[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]]] ).astype("float32") - x = fluid.data(name="x", shape=[1, 1, 4, 4], dtype="float32") + x = paddle.static.data(name="x", shape=[1, 1, 4, 4], dtype="float32") output, indices = F.max_pool2d( x, kernel_size=2, stride=2, return_mask=True ) diff --git a/python/paddle/fluid/tests/unittests/test_unzip_op.py b/python/paddle/fluid/tests/unittests/test_unzip_op.py index 71caac8c0f2..0dbe8559711 100644 --- a/python/paddle/fluid/tests/unittests/test_unzip_op.py +++ b/python/paddle/fluid/tests/unittests/test_unzip_op.py @@ -29,8 +29,8 @@ class TestUnzipOp(unittest.TestCase): paddle.enable_static() if core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) - x = fluid.data(name='X', shape=[3, 4], dtype='float64') - lod = fluid.data(name='lod', shape=[11], dtype='int64') + x = paddle.static.data(name='X', shape=[3, 4], dtype='float64') + lod = paddle.static.data(name='lod', shape=[11], dtype='int64') output = paddle.incubate.operators.unzip(x, lod) input = [ diff --git a/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py b/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py index 9cef4d72167..8b15157c846 100644 --- a/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py +++ b/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py @@ -17,6 +17,7 @@ import unittest import numpy as np from op_test import OpTest +import paddle import paddle.fluid as fluid import paddle.static.amp.amp_nn as amp_nn @@ -136,17 +137,19 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): class TestUpdateLossScalingLayer(unittest.TestCase): def loss_scaling_check(self, use_cuda=True, scope=fluid.Scope()): - a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') - b = fluid.data(name="b", shape=[512, 128], dtype='float32') + a = paddle.static.data(name="a", shape=[1024, 1024], dtype='float32') + b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] - found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data( + found_inf = paddle.static.data( + name="found_inf", shape=[1], dtype='bool' + ) + prev_loss_scaling = paddle.static.data( name="prev_loss_scaling", shape=[1], dtype='float32' ) - num_good_steps = fluid.data( + num_good_steps = paddle.static.data( name="num_good_steps", shape=[1], dtype='int32' ) - num_bad_steps = fluid.data( + num_bad_steps = paddle.static.data( name="num_bad_steps", shape=[1], dtype='int32' ) @@ -207,17 +210,19 @@ class TestUpdateLossScalingLayer(unittest.TestCase): assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): - a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') - b = fluid.data(name="b", shape=[512, 128], dtype='float32') + a = paddle.static.data(name="a", shape=[1024, 1024], dtype='float32') + b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] - found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data( + found_inf = paddle.static.data( + name="found_inf", shape=[1], dtype='bool' + ) + prev_loss_scaling = paddle.static.data( name="prev_loss_scaling", shape=[1], dtype='float32' ) - num_good_steps = fluid.data( + num_good_steps = paddle.static.data( name="num_good_steps", shape=[1], dtype='int32' ) - num_bad_steps = fluid.data( + num_bad_steps = paddle.static.data( name="num_bad_steps", shape=[1], dtype='int32' ) diff --git a/python/paddle/fluid/tests/unittests/test_variance_layer.py b/python/paddle/fluid/tests/unittests/test_variance_layer.py index 6d9338542dd..2613fb91b15 100644 --- a/python/paddle/fluid/tests/unittests/test_variance_layer.py +++ b/python/paddle/fluid/tests/unittests/test_variance_layer.py @@ -48,7 +48,7 @@ class TestVarAPI(unittest.TestCase): def static(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape, self.dtype) + x = paddle.static.data('X', self.shape, self.dtype) out = paddle.var(x, self.axis, self.unbiased, self.keepdim) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) @@ -115,7 +115,7 @@ class TestVarAPI_alias(unittest.TestCase): class TestVarError(unittest.TestCase): def test_error(self): with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [2, 3, 4], 'int32') + x = paddle.static.data('X', [2, 3, 4], 'int32') self.assertRaises(TypeError, paddle.var, x) diff --git a/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py b/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py index f6801d4f089..6bb21611f83 100644 --- a/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py +++ b/python/paddle/fluid/tests/unittests/test_viterbi_decode_op.py @@ -124,13 +124,15 @@ class TestViterbiAPI(unittest.TestCase): def check_static_result(self, place): bz, length, ntags = self.bz, self.len, self.ntags with fluid.program_guard(fluid.Program(), fluid.Program()): - Input = fluid.data( + Input = paddle.static.data( name="Input", shape=[bz, length, ntags], dtype="float32" ) - Transition = fluid.data( + Transition = paddle.static.data( name="Transition", shape=[ntags, ntags], dtype="float32" ) - Length = fluid.data(name="Length", shape=[bz], dtype="int64") + Length = paddle.static.data( + name="Length", shape=[bz], dtype="int64" + ) decoder = paddle.text.ViterbiDecoder(Transition, self.use_tag) score, path = decoder(Input, Length) exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_warprnnt_op.py b/python/paddle/fluid/tests/unittests/test_warprnnt_op.py index 381ed400735..567ceb72f11 100644 --- a/python/paddle/fluid/tests/unittests/test_warprnnt_op.py +++ b/python/paddle/fluid/tests/unittests/test_warprnnt_op.py @@ -18,7 +18,6 @@ import numpy as np from op_test import OpTest import paddle -import paddle.fluid as fluid import paddle.fluid.core as core from paddle import _C_ops from paddle.fluid import Program, program_guard @@ -277,17 +276,21 @@ class TestWarpRNNTOpError(unittest.TestCase): def test_errors(self): print("test_errors") with program_guard(Program(), Program()): - logits = fluid.data(name='input', shape=[5, 16, 6], dtype='float32') - logits_length = fluid.data( + logits = paddle.static.data( + name='input', shape=[5, 16, 6], dtype='float32' + ) + logits_length = paddle.static.data( name='logit_lengths', shape=[None], dtype='int32' ) - label = fluid.data(name='labels', shape=[16, 3], dtype='int32') - label_length = fluid.data( + label = paddle.static.data( + name='labels', shape=[16, 3], dtype='int32' + ) + label_length = paddle.static.data( name='label_lengths', shape=[None], dtype='int32' ) def test_logits_Variable(): - logits_data = fluid.data( + logits_data = paddle.static.data( name='logits_data', shape=[5, 16, 6], dtype='int32' ) paddle.nn.functional.rnnt_loss( @@ -300,7 +303,7 @@ class TestWarpRNNTOpError(unittest.TestCase): self.assertRaises(TypeError, test_logits_Variable) def test_label_Variable(): - label_data = fluid.data( + label_data = paddle.static.data( name='label_data', shape=[16, 3], dtype='int64' ) paddle.nn.functional.rnnt_loss( @@ -313,7 +316,7 @@ class TestWarpRNNTOpError(unittest.TestCase): self.assertRaises(TypeError, test_label_Variable) def test_logits_len_Variable(): - logits_length_data = fluid.data( + logits_length_data = paddle.static.data( name='logits_length_data', shape=[None], dtype='int64' ) paddle.nn.functional.rnnt_loss( @@ -326,7 +329,7 @@ class TestWarpRNNTOpError(unittest.TestCase): self.assertRaises(TypeError, test_logits_len_Variable) def test_label_len_Variable(): - label_length_data = fluid.data( + label_length_data = paddle.static.data( name='label_length_data', shape=[None], dtype='int64' ) paddle.nn.functional.rnnt_loss( diff --git a/python/paddle/fluid/tests/unittests/test_while_loop_op.py b/python/paddle/fluid/tests/unittests/test_while_loop_op.py index 714e541f581..cbb69319350 100644 --- a/python/paddle/fluid/tests/unittests/test_while_loop_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_loop_op.py @@ -72,7 +72,7 @@ class TestApiWhileLoop(unittest.TestCase): ten = paddle.tensor.fill_constant( shape=[1], dtype='int64', value=10 ) - mem = fluid.data(name='mem', shape=[10], dtype='float32') + mem = paddle.static.data(name='mem', shape=[10], dtype='float32') one = paddle.tensor.fill_constant( shape=[10], dtype='float32', value=1 ) @@ -205,8 +205,12 @@ class TestApiWhileLoop_Nested(unittest.TestCase): with program_guard(main_program, startup_program): i = layers.zeros(shape=[1], dtype='int64') j = layers.zeros(shape=[1], dtype='int64') - init = fluid.data(name='init', shape=[3, 3], dtype='float32') - sums = fluid.data(name='sums', shape=[3, 3], dtype='float32') + init = paddle.static.data( + name='init', shape=[3, 3], dtype='float32' + ) + sums = paddle.static.data( + name='sums', shape=[3, 3], dtype='float32' + ) loop_len1 = paddle.tensor.fill_constant( shape=[1], dtype='int64', value=2 ) @@ -254,7 +258,7 @@ class TestApiWhileLoop_Backward(unittest.TestCase): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): - i = fluid.data(name='i', shape=[1], dtype='float32') + i = paddle.static.data(name='i', shape=[1], dtype='float32') i.stop_gradient = False eleven = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=11 @@ -262,7 +266,7 @@ class TestApiWhileLoop_Backward(unittest.TestCase): one = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=1 ) - x = fluid.data(name='x', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[1], dtype='float32') x.stop_gradient = False out = paddle.static.nn.while_loop(cond, body, [i, x]) @@ -301,9 +305,9 @@ class TestApiWhileLoop_Backward(unittest.TestCase): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): - i = fluid.data(name='i', shape=[1], dtype='float32') + i = paddle.static.data(name='i', shape=[1], dtype='float32') i.stop_gradient = False - x = fluid.data(name='x', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[1], dtype='float32') x.stop_gradient = False out = paddle.static.nn.while_loop(cond, body, [i, x]) @@ -365,10 +369,10 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): - d0 = fluid.data(name='d0', shape=[10], dtype='float32') - d1 = fluid.data(name='d1', shape=[10], dtype='float32') - d2 = fluid.data(name='d2', shape=[10], dtype='float32') - x = fluid.data(name='x', shape=[10], dtype='float32') + d0 = paddle.static.data(name='d0', shape=[10], dtype='float32') + d1 = paddle.static.data(name='d1', shape=[10], dtype='float32') + d2 = paddle.static.data(name='d2', shape=[10], dtype='float32') + x = paddle.static.data(name='x', shape=[10], dtype='float32') x.stop_gradient = False i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True diff --git a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py index 8ab2ae07e94..4a970d2acb3 100644 --- a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py @@ -26,7 +26,7 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ class TestZerosLikeAPIError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x = paddle.fluid.data('x', [3, 4]) + x = paddle.static.data('x', [3, 4]) self.assertRaises(TypeError, zeros_like, x, 'int8') @@ -36,7 +36,7 @@ class TestZerosLikeAPI(unittest.TestCase): startup_program = Program() train_program = Program() with program_guard(train_program, startup_program): - x = paddle.fluid.data('X', shape) + x = paddle.static.data('X', shape) out1 = zeros_like(x) out2 = zeros_like(x, np.bool_) out3 = zeros_like(x, 'float64') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py index 04931e5fb53..e21153151d4 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_activation_op_xpu.py @@ -126,7 +126,7 @@ class TestSiluAPI(unittest.TestCase): def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', [11, 17]) + x = paddle.static.data('X', [11, 17]) out1 = F.silu(x) m = paddle.nn.Silu() out2 = m(x) @@ -152,12 +152,12 @@ class TestSiluAPI(unittest.TestCase): # The input type must be Variable. self.assertRaises(TypeError, F.silu, 1) # The input dtype must be float16, float32, float64. - x_int32 = paddle.fluid.data( + x_int32 = paddle.static.data( name='x_int32', shape=[11, 17], dtype='int32' ) self.assertRaises(TypeError, F.silu, x_int32) # support the input dtype is float16 - x_fp16 = paddle.fluid.data( + x_fp16 = paddle.static.data( name='x_fp16', shape=[11, 17], dtype='float16' ) F.silu(x_fp16) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py index 57b57adce3b..0eb328a0fa1 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py @@ -196,7 +196,7 @@ class XPUTestAdamwOp2(XPUOpTestWrapper): startup = fluid.Program() with fluid.program_guard(train_prog, startup): with fluid.unique_name.guard(): - data = fluid.data(name="data", shape=shape) + data = paddle.static.data(name="data", shape=shape) conv = paddle.static.nn.conv2d(data, 8, 3) loss = paddle.mean(conv) @@ -453,8 +453,12 @@ class XPUTestAdamwOp2(XPUOpTestWrapper): startup = fluid.Program() with fluid.program_guard(train_prog, startup): with fluid.unique_name.guard(): - x = fluid.data(name='x', shape=[None, 10], dtype='float32') - y = fluid.data(name='y', shape=[None, 1], dtype='float32') + x = paddle.static.data( + name='x', shape=[None, 10], dtype='float32' + ) + y = paddle.static.data( + name='y', shape=[None, 1], dtype='float32' + ) weight_attr1 = paddle.framework.ParamAttr( name="linear_0.w_0" diff --git a/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py index 650658d77e5..97460b54aa3 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_assign_op_xpu.py @@ -67,10 +67,6 @@ class XPUTestAssignOP(XPUOpTestWrapper): def init_config(self): self.input_shape = [2, 768] - class XPUTestAssign2(TestAssignOPBase): - def init_config(self): - self.input_shape = [3, 8, 4096] - class XPUTestAssign3(TestAssignOPBase): def init_config(self): self.input_shape = [1024] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py index 3ee0469b614..57b9fb25f2e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py @@ -202,17 +202,17 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): def test_infer(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) - scale = paddle.fluid.data( + x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) + scale = paddle.static.data( 'Scale', self.scale_np.shape, self.scale_np.dtype ) - bias = paddle.fluid.data( + bias = paddle.static.data( 'Bias', self.bias_np.shape, self.bias_np.dtype ) - mean = paddle.fluid.data( + mean = paddle.static.data( 'Mean', self.mean_np.shape, self.mean_np.dtype ) - variance = paddle.fluid.data( + variance = paddle.static.data( 'Variance', self.variance_np.shape, self.variance_np.dtype ) y = F.batch_norm( diff --git a/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py index d4166b9e0da..dc8e996e093 100755 --- a/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_bilinear_interp_op_xpu.py @@ -466,12 +466,12 @@ class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor): "core is not compiled with XPU") class TestBilinearInterpOpAPI(unittest.TestCase): def test_case(self): - x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32") + x = paddle.static.data(name="x", shape=[2, 3, 6, 6], dtype="float32") - dim = fluid.data(name="dim", shape=[1], dtype="int32") - shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32") - actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32") - scale_tensor = fluid.data( + dim = paddle.static.data(name="dim", shape=[1], dtype="int32") + shape_tensor = paddle.static.data(name="shape_tensor", shape=[2], dtype="int32") + actual_size = paddle.static.data(name="actual_size", shape=[2], dtype="int32") + scale_tensor = paddle.static.data( name="scale_tensor", shape=[1], dtype="float32") out1 = fluid.layers.resize_bilinear(x, out_shape=[12, 12]) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py index 4bf88d40b7a..5aa2d78d343 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_clip_op_xpu.py @@ -155,9 +155,11 @@ class TestClipAPI(unittest.TestCase): paddle.enable_static() data_shape = [1, 9, 9, 4] data = np.random.random(data_shape).astype('float32') - images = fluid.data(name='image', shape=data_shape, dtype='float32') - min = fluid.data(name='min', shape=[1], dtype='float32') - max = fluid.data(name='max', shape=[1], dtype='float32') + images = paddle.static.data( + name='image', shape=data_shape, dtype='float32' + ) + min = paddle.static.data(name='min', shape=[1], dtype='float32') + max = paddle.static.data(name='max', shape=[1], dtype='float32') place = ( fluid.XPUPlace(0) @@ -221,8 +223,8 @@ class TestClipAPI(unittest.TestCase): def test_errors(self): paddle.enable_static() - x1 = fluid.data(name='x1', shape=[1], dtype="int16") - x2 = fluid.data(name='x2', shape=[1], dtype="int8") + x1 = paddle.static.data(name='x1', shape=[1], dtype="int16") + x2 = paddle.static.data(name='x2', shape=[1], dtype="int8") self.assertRaises(TypeError, paddle.clip, x=x1, min=0.2, max=0.8) self.assertRaises(TypeError, paddle.clip, x=x2, min=0.2, max=0.8) paddle.disable_static() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_diagonal_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_diagonal_op_xpu.py index 04561ce3ed9..968623157cb 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_diagonal_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_diagonal_op_xpu.py @@ -137,7 +137,7 @@ class TestDiagonalAPI(unittest.TestCase): def test_api_static(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): - x = paddle.fluid.data('X', self.shape) + x = paddle.static.data('X', self.shape) out = paddle.diagonal(x) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x}, fetch_list=[out]) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py index fbe68be4032..5e2fbebf227 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py @@ -265,8 +265,8 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): class TestAddOp(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = paddle.add(x, y, name='add_res') self.assertEqual(('add_res' in y_1.name), True) @@ -280,8 +280,8 @@ class XPUTestElementwiseAddOp(XPUOpTestWrapper): "y": np.array([1, 5, 2]).astype('float32'), } - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') + x = paddle.static.data(name="x", shape=[3], dtype='float32') + y = paddle.static.data(name="y", shape=[3], dtype='float32') z = paddle.add(x, y) place = fluid.XPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py index e1306610401..7ddc852b5e7 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu_kp.py @@ -312,8 +312,8 @@ class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): class TestAddOp(unittest.TestCase): def test_name(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') + x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') y_1 = paddle.add(x, y, name='add_res') self.assertEqual(('add_res' in y_1.name), True) @@ -327,8 +327,8 @@ class TestAddOp(unittest.TestCase): "y": np.array([1, 5, 2]).astype('float32'), } - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') + x = paddle.static.data(name="x", shape=[3], dtype='float32') + y = paddle.static.data(name="y", shape=[3], dtype='float32') z = paddle.add(x, y) place = fluid.XPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py index 10b8314b85a..ef1936d2de4 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py @@ -255,7 +255,7 @@ class XPUTestElementwiseDivOp(XPUOpTestWrapper): class TestElementwiseDivBroadcast(unittest.TestCase): def test_shape_with_batch_sizes(self): with fluid.program_guard(fluid.Program()): - x_var = fluid.data( + x_var = paddle.static.data( name='x', dtype='float32', shape=[None, 3, None, None] ) one = 2.0 diff --git a/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py index 7679baf1950..e6deee478de 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_gaussian_random_op_xpu.py @@ -185,11 +185,11 @@ class TestGaussianRandomAPI(unittest.TestCase): positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2000) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 500) - shape_tensor_int32 = fluid.data( + shape_tensor_int32 = paddle.static.data( name="shape_tensor_int32", shape=[2], dtype="int32" ) - shape_tensor_int64 = fluid.data( + shape_tensor_int64 = paddle.static.data( name="shape_tensor_int64", shape=[2], dtype="int64" ) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_index_sample_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_index_sample_op_xpu.py index 15d8e4abbd0..a78f84015de 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_index_sample_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_index_sample_op_xpu.py @@ -125,8 +125,8 @@ class TestIndexSampleShape(unittest.TestCase): low=0, high=x_shape[1], size=index_shape ).astype(index_type) - x = fluid.data(name='x', shape=[-1, 5], dtype='float32') - index = fluid.data(name='index', shape=[-1, 3], dtype='int32') + x = paddle.static.data(name='x', shape=[-1, 5], dtype='float32') + index = paddle.static.data(name='index', shape=[-1, 3], dtype='int32') output = paddle.index_sample(x=x, index=index) place = fluid.XPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py index 861103061b1..f77c3fd2303 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py @@ -121,8 +121,8 @@ class XPUTestKLDivLossOp(XPUOpTestWrapper): self.run_kl_loss('none') def test_kl_loss_static_api(self): - input = paddle.fluid.data(name='input', shape=[5, 20]) - label = paddle.fluid.data(name='label', shape=[5, 20]) + input = paddle.static.data(name='input', shape=[5, 20]) + label = paddle.static.data(name='label', shape=[5, 20]) paddle.nn.functional.kl_div(input, label) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py index 64d52d077fe..d526dae396d 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_masked_select_op_xpu.py @@ -95,8 +95,8 @@ class TestMaskedSelectAPI(unittest.TestCase): def test_static_mode(self): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') np_x = np.random.random(shape).astype('float32') np_mask = np.array(np.random.randint(2, size=shape, dtype=bool)) @@ -120,9 +120,9 @@ class TestMaskedSelectError(unittest.TestCase): ): shape = [8, 9, 6] - x = paddle.fluid.data(shape=shape, dtype='float32', name='x') - mask = paddle.fluid.data(shape=shape, dtype='bool', name='mask') - mask_float = paddle.fluid.data( + x = paddle.static.data(shape=shape, dtype='float32', name='x') + mask = paddle.static.data(shape=shape, dtype='bool', name='mask') + mask_float = paddle.static.data( shape=shape, dtype='float32', name='mask_float' ) np_x = np.random.random(shape).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py index c04cc72be4d..a89d7ae810e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py @@ -142,9 +142,11 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): class API_TestMm(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2], dtype=self.in_type) - y = fluid.data(name='y', shape=[2], dtype=self.in_type) - res = fluid.data(name="output", shape=[1], dtype=self.in_type) + x = paddle.static.data(name="x", shape=[2], dtype=self.in_type) + y = paddle.static.data(name='y', shape=[2], dtype=self.in_type) + res = paddle.static.data( + name="output", shape=[1], dtype=self.in_type + ) result = paddle.mm(x, y) exe = fluid.Executor(fluid.XPUPlace(0)) data1 = np.random.rand(2).astype(self.in_type) @@ -193,10 +195,10 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): def test_errors(self): def test_error1(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[10, 2], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[3, 10], dtype="float32" ) paddle.mm(data1, data2) @@ -205,10 +207,10 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): def test_error2(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[-1, 10, 2], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[-1, 2, 10], dtype="float32" ) paddle.mm(data1, data2) @@ -217,10 +219,10 @@ class XPUTestMatmulOpErr(XPUOpTestWrapper): def test_error3(): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data( + data1 = paddle.static.data( name="data1", shape=[10, 10, 2], dtype="float32" ) - data2 = fluid.data( + data2 = paddle.static.data( name="data2", shape=[3, 2, 10], dtype="float32" ) paddle.mm(data1, data2) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_op_xpu.py index 42ea4032e1f..441439838cb 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_nearest_interp_op_xpu.py @@ -404,7 +404,7 @@ class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor): "core is not compiled with XPU") class TestNearestInterpException(unittest.TestCase): def test_exception(self): - input = fluid.data(name="input", shape=[1, 3, 6, 6], dtype="float32") + input = paddle.static.data(name="input", shape=[1, 3, 6, 6], dtype="float32") def attr_data_format(): # for 4-D input, data_format can only be NCHW or NHWC diff --git a/python/paddle/fluid/tests/unittests/xpu/test_pad3d_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_pad3d_op_xpu.py index 73af3050867..9425f701590 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_pad3d_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_pad3d_op_xpu.py @@ -188,8 +188,8 @@ class XPUTestPad3dOp(XPUOpTestWrapper): mode = "constant" value = 100 input_data = np.random.rand(*input_shape).astype(self.dtype) - x = paddle.fluid.data( - name="x", shape=input_shape, dtype=self.dtype + x = paddle.static.data( + name="x", shape=input_shape, dtype="float32" ) result = F.pad( x=x, pad=pad, value=value, mode=mode, data_format="NCDHW" @@ -211,8 +211,8 @@ class XPUTestPad3dOp(XPUOpTestWrapper): pad = [1, 2, 1, 1, 1, 2] mode = "reflect" input_data = np.random.rand(*input_shape).astype(self.dtype) - x = paddle.fluid.data( - name="x", shape=input_shape, dtype=self.dtype + x = paddle.static.data( + name="x", shape=input_shape, dtype="float32" ) result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") @@ -239,8 +239,8 @@ class XPUTestPad3dOp(XPUOpTestWrapper): pad = [1, 2, 1, 1, 3, 4] mode = "replicate" input_data = np.random.rand(*input_shape).astype(self.dtype) - x = paddle.fluid.data( - name="x", shape=input_shape, dtype=self.dtype + x = paddle.static.data( + name="x", shape=input_shape, dtype="float32" ) result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") diff --git a/python/paddle/fluid/tests/unittests/xpu/test_prelu_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_prelu_op_xpu.py index 666c29f7fca..043d5436d9f 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_prelu_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_prelu_op_xpu.py @@ -184,7 +184,7 @@ class TestModeError(unittest.TestCase): def test_mode_error(self): main_program = Program() with fluid.program_guard(main_program, Program()): - x = fluid.data(name='x', shape=[2, 3, 4, 5]) + x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = prelu_t(x, 'any') except Exception as e: @@ -193,7 +193,7 @@ class TestModeError(unittest.TestCase): def test_data_format_error1(self): main_program = Program() with fluid.program_guard(main_program, Program()): - x = fluid.data(name='x', shape=[2, 3, 4, 5]) + x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = prelu_t(x, 'channel', data_format='N') except Exception as e: @@ -202,7 +202,7 @@ class TestModeError(unittest.TestCase): def test_data_format_error2(self): main_program = Program() with fluid.program_guard(main_program, Program()): - x = fluid.data(name='x', shape=[2, 3, 4, 5]) + x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = paddle.static.nn.prelu(x, 'channel', data_format='N') except ValueError as e: diff --git a/python/paddle/fluid/tests/unittests/xpu/test_prod_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_prod_op_xpu.py index 369cfb4ac96..1fb907f9f09 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_prod_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_prod_op_xpu.py @@ -74,7 +74,7 @@ class TestProdOp(unittest.TestCase): ) def run_static(self): - input = paddle.fluid.data( + input = paddle.static.data( name='input', shape=[10, 10, 5], dtype='float32' ) result0 = paddle.prod(input) @@ -146,8 +146,8 @@ class TestProdOpError(unittest.TestCase): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): - x = paddle.fluid.data(name='x', shape=[2, 2, 4], dtype='float32') - bool_x = paddle.fluid.data( + x = paddle.static.data(name='x', shape=[2, 2, 4], dtype='float32') + bool_x = paddle.static.data( name='bool_x', shape=[2, 2, 4], dtype='bool' ) # The argument x shoule be a Tensor diff --git a/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py index 285d7cbfb00..99d7091ca41 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py @@ -135,14 +135,14 @@ class TestRaiseSumError(unittest.TestCase): self.assertRaises(TypeError, test_type) def test_dtype(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") - data2 = fluid.data(name="input2", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") + data2 = paddle.static.data(name="input2", shape=[10], dtype="int8") paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_dtype) def test_dtype1(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") paddle.add_n(data1) self.assertRaises(TypeError, test_dtype1) @@ -156,30 +156,38 @@ class TestRaiseSumsError(unittest.TestCase): self.assertRaises(TypeError, test_type) def test_dtype(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") - data2 = fluid.data(name="input2", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") + data2 = paddle.static.data(name="input2", shape=[10], dtype="int8") paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_dtype) def test_dtype1(): - data1 = fluid.data(name="input1", shape=[10], dtype="int8") + data1 = paddle.static.data(name="input1", shape=[10], dtype="int8") paddle.add_n(data1) self.assertRaises(TypeError, test_dtype1) def test_out_type(): - data1 = fluid.data(name="input1", shape=[10], dtype="flaot32") - data2 = fluid.data(name="input2", shape=[10], dtype="float32") + data1 = paddle.static.data( + name="input1", shape=[10], dtype="flaot32" + ) + data2 = paddle.static.data( + name="input2", shape=[10], dtype="float32" + ) out = [10] out = paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_out_type) def test_out_dtype(): - data1 = fluid.data(name="input1", shape=[10], dtype="flaot32") - data2 = fluid.data(name="input2", shape=[10], dtype="float32") - out = fluid.data(name="out", shape=[10], dtype="int8") + data1 = paddle.static.data( + name="input1", shape=[10], dtype="flaot32" + ) + data2 = paddle.static.data( + name="input2", shape=[10], dtype="float32" + ) + out = paddle.static.data(name="out", shape=[10], dtype="int8") out = paddle.add_n([data1, data2]) self.assertRaises(TypeError, test_out_dtype) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_tril_triu_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_tril_triu_op_xpu.py index 85dbdeeadea..d66e75c5238 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_tril_triu_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_tril_triu_op_xpu.py @@ -25,7 +25,6 @@ from xpu.get_test_cover_info import ( ) import paddle -import paddle.fluid as fluid import paddle.tensor as tensor paddle.enable_static() @@ -136,7 +135,7 @@ class XPUTestTrilTriuOp(XPUOpTestWrapper): class TestTrilTriuOpError(unittest.TestCase): def test_errors1(self): paddle.enable_static() - data = fluid.data(shape=(20, 22), dtype='float32', name="data1") + data = paddle.static.data(shape=(20, 22), dtype='float32', name="data1") op_type = np.random.choice(['triu', 'tril']) errmsg = { "diagonal: TypeError": "diagonal in {} must be a python Int".format( @@ -151,7 +150,7 @@ class TestTrilTriuOpError(unittest.TestCase): def test_errors2(self): paddle.enable_static() - data = fluid.data(shape=(200,), dtype='float32', name="data2") + data = paddle.static.data(shape=(200,), dtype='float32', name="data2") op_type = np.random.choice(['triu', 'tril']) errmsg = { "input: ValueError": "x shape in {} must be at least 2-D".format( diff --git a/python/paddle/fluid/tests/unittests/xpu/test_unbind_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_unbind_op_xpu.py index 74592c10245..49c72b67195 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_unbind_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_unbind_op_xpu.py @@ -42,10 +42,10 @@ class XPUTestUnbindOP(XPUOpTestWrapper): def test_unbind(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) - x_1 = fluid.data(shape=[2, 3], dtype=self.dtype, name='x_1') + x_1 = paddle.static.data(shape=[2, 3], dtype=self.dtype, name='x_1') [out_0, out_1] = tensor.unbind(input=x_1, axis=0) input_1 = np.random.random([2, 3]).astype(self.dtype) - axis = fluid.data(shape=[1], dtype='int32', name='axis') + axis = paddle.static.data(shape=[1], dtype='int32', name='axis') exe = fluid.Executor(place=self.place) [res_1, res_2] = exe.run( @@ -82,10 +82,10 @@ class XPUTestUnbindOP(XPUOpTestWrapper): def test_layers_unbind(self): self.dtype = self.in_type self.place = paddle.XPUPlace(0) - x_1 = fluid.data(shape=[2, 3], dtype=self.dtype, name='x_1') + x_1 = paddle.static.data(shape=[2, 3], dtype=self.dtype, name='x_1') [out_0, out_1] = paddle.unbind(input=x_1, axis=0) input_1 = np.random.random([2, 3]).astype(self.dtype) - axis = fluid.data(shape=[1], dtype='int32', name='axis') + axis = paddle.static.data(shape=[1], dtype='int32', name='axis') exe = fluid.Executor(place=self.place) [res_1, res_2] = exe.run( @@ -194,7 +194,7 @@ class XPUTestUnbindOP(XPUOpTestWrapper): with program_guard(Program(), Program()): self.dtype = self.in_type self.place = paddle.XPUPlace(0) - x = fluid.data(shape=[2, 3], dtype=self.dtype, name='x') + x = paddle.static.data(shape=[2, 3], dtype=self.dtype, name='x') def test_table_Variable(): tensor.unbind(input=x, axis=2.0) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py index 44d79efc97b..e8a1faff332 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py @@ -111,17 +111,21 @@ class XPUTestUpdateLossScalingOp(XPUOpTestWrapper): class TestUpdateLossScalingLayer(unittest.TestCase): def loss_scaling_check(self, scope=fluid.Scope()): - a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') - b = fluid.data(name="b", shape=[512, 128], dtype='float32') + a = paddle.static.data( + name="a", shape=[1024, 1024], dtype='float32' + ) + b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] - found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data( + found_inf = paddle.static.data( + name="found_inf", shape=[1], dtype='bool' + ) + prev_loss_scaling = paddle.static.data( name="prev_loss_scaling", shape=[1], dtype='float32' ) - num_good_steps = fluid.data( + num_good_steps = paddle.static.data( name="num_good_steps", shape=[1], dtype='int32' ) - num_bad_steps = fluid.data( + num_bad_steps = paddle.static.data( name="num_bad_steps", shape=[1], dtype='int32' ) @@ -182,17 +186,21 @@ class XPUTestUpdateLossScalingOp(XPUOpTestWrapper): assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v)) def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): - a = fluid.data(name="a", shape=[1024, 1024], dtype='float32') - b = fluid.data(name="b", shape=[512, 128], dtype='float32') + a = paddle.static.data( + name="a", shape=[1024, 1024], dtype='float32' + ) + b = paddle.static.data(name="b", shape=[512, 128], dtype='float32') x = [a, b] - found_inf = fluid.data(name="found_inf", shape=[1], dtype='bool') - prev_loss_scaling = fluid.data( + found_inf = paddle.static.data( + name="found_inf", shape=[1], dtype='bool' + ) + prev_loss_scaling = paddle.static.data( name="prev_loss_scaling", shape=[1], dtype='float32' ) - num_good_steps = fluid.data( + num_good_steps = paddle.static.data( name="num_good_steps", shape=[1], dtype='int32' ) - num_bad_steps = fluid.data( + num_bad_steps = paddle.static.data( name="num_bad_steps", shape=[1], dtype='int32' ) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_warpctc_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_warpctc_op_xpu.py index 1a5866cc3d1..3cda4eaac18 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_warpctc_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_warpctc_op_xpu.py @@ -27,7 +27,6 @@ from xpu.get_test_cover_info import ( ) import paddle -import paddle.fluid as fluid import paddle.nn.functional as F from paddle.fluid import Program, program_guard @@ -341,14 +340,16 @@ class XPUTestWarpCTCOp(XPUOpTestWrapper): self.dtype = self.in_type self.place = paddle.XPUPlace(0) with program_guard(Program(), Program()): - logits = fluid.data( + logits = paddle.static.data( name='logits', shape=[5, 16, 6], dtype=self.dtype ) - logits_length = fluid.data( + logits_length = paddle.static.data( name='logits_length', shape=[None], dtype='int64' ) - label = fluid.data(name='label', shape=[16, 3], dtype='int32') - label_length = fluid.data( + label = paddle.static.data( + name='label', shape=[16, 3], dtype='int32' + ) + label_length = paddle.static.data( name='labels_length', shape=[None], dtype='int64' ) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py index 77b0e3d202e..06316d5a4cd 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py @@ -106,11 +106,15 @@ class TestXPUWhereAPI(unittest.TestCase): train_prog = fluid.Program() startup = fluid.Program() with fluid.program_guard(train_prog, startup): - cond = fluid.data( + cond = paddle.static.data( name='cond', shape=self.shape, dtype='bool' ) - x = fluid.data(name='x', shape=self.shape, dtype='float32') - y = fluid.data(name='y', shape=self.shape, dtype='float32') + x = paddle.static.data( + name='x', shape=self.shape, dtype='float32' + ) + y = paddle.static.data( + name='y', shape=self.shape, dtype='float32' + ) x.stop_gradient = x_stop_gradient y.stop_gradient = y_stop_gradient diff --git a/python/paddle/jit/dy2static/program_translator.py b/python/paddle/jit/dy2static/program_translator.py index e6bdf04f137..dc47184f651 100644 --- a/python/paddle/jit/dy2static/program_translator.py +++ b/python/paddle/jit/dy2static/program_translator.py @@ -993,7 +993,7 @@ class ConcreteProgram: with framework.program_guard(main_program, startup_program): with _switch_declarative_mode_guard_(is_declarative=True): - # 1. Adds `fluid.data` layers for input if needed + # 1. Adds `paddle.static.data` layers for input if needed static_inputs = func_spec.to_static_inputs_with_spec( input_spec, main_program ) diff --git a/python/paddle/nn/clip.py b/python/paddle/nn/clip.py index 4b7e6215b2d..d0274b4eb91 100644 --- a/python/paddle/nn/clip.py +++ b/python/paddle/nn/clip.py @@ -969,7 +969,7 @@ def set_gradient_clip(clip, param_list=None, program=None): paddle.enable_static() def network(): - image = fluid.data(name='image', shape=[ + image = paddle.static.data(name='image', shape=[ None, 28], dtype='float32') param_attr1 = fluid.ParamAttr("fc1_param") fc1 = fluid.layers.fc(image, size=10, param_attr=param_attr1) diff --git a/python/paddle/reader/decorator.py b/python/paddle/reader/decorator.py index 61e474c857f..5902c88130e 100644 --- a/python/paddle/reader/decorator.py +++ b/python/paddle/reader/decorator.py @@ -527,7 +527,7 @@ def multiprocess_reader(readers, use_pipe=True, queue_size=1000): Example: .. code-block:: python - + import paddle import paddle.fluid as fluid from paddle.fluid.io import multiprocess_reader import numpy as np @@ -556,7 +556,7 @@ def multiprocess_reader(readers, use_pipe=True, queue_size=1000): with fluid.program_guard(fluid.Program(), fluid.Program()): place = fluid.CPUPlace() # the 1st 2 is batch size - image = fluid.data(name='image', dtype='int64', shape=[2, 1, 2]) + image = paddle.static.data(name='image', dtype='int64', shape=[2, 1, 2]) fluid.layers.Print(image) # print detailed tensor info of image variable diff --git a/python/paddle/static/io.py b/python/paddle/static/io.py index 3227dc6c67d..45143b4ad95 100644 --- a/python/paddle/static/io.py +++ b/python/paddle/static/io.py @@ -1721,7 +1721,7 @@ def get_program_persistable_vars(program): import paddle.static.io as io import paddle.fluid as fluid paddle.enable_static() - data = fluid.data(name="img", shape=[64, 784]) + data = paddle.static.data(name="img", shape=[64, 784]) w = paddle.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') b = paddle.create_parameter(shape=[200], dtype='float32', name='fc_b') list_para = io.get_program_persistable_vars( fluid.default_main_program() ) diff --git a/python/paddle/tensor/attribute.py b/python/paddle/tensor/attribute.py index c79c9553c2f..679bf3d8118 100644 --- a/python/paddle/tensor/attribute.py +++ b/python/paddle/tensor/attribute.py @@ -92,7 +92,7 @@ def shape(input): import paddle paddle.enable_static() - inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32") + inputs = paddle.static.data(name="x", shape=[3, 100, 100], dtype="float32") output = paddle.shape(inputs) exe = fluid.Executor(fluid.CPUPlace()) diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index e3fc96f3854..a556a66fc15 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -258,7 +258,7 @@ def uniform_random_batch_size_like( from paddle.tensor import random paddle.enable_static() # example 1: - input = fluid.data(name="input", shape=[1, 3], dtype='float32') + input = paddle.static.data(name="input", shape=[1, 3], dtype='float32') out_1 = random.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4] # example 2: out_2 = random.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3] diff --git a/python/paddle/tests/test_metrics.py b/python/paddle/tests/test_metrics.py index c604c7088e9..af668217d50 100644 --- a/python/paddle/tests/test_metrics.py +++ b/python/paddle/tests/test_metrics.py @@ -205,10 +205,12 @@ class TestAccuracyStatic(TestAccuracyDynamic): main_prog.random_seed = 1024 startup_prog.random_seed = 1024 with fluid.program_guard(main_prog, startup_prog): - pred = fluid.data( + pred = paddle.static.data( name='pred', shape=[None, self.class_num], dtype='float32' ) - label = fluid.data(name='label', shape=[None, 1], dtype='int64') + label = paddle.static.data( + name='label', shape=[None, 1], dtype='int64' + ) acc = paddle.metric.Accuracy(topk=self.topk, name=self.name) state = acc.compute(pred, label) -- GitLab