From 5670644ca8ae320a68afd9eb73300508c5b61615 Mon Sep 17 00:00:00 2001 From: GGBond8488 <33050871+GGBond8488@users.noreply.github.com> Date: Fri, 20 Jan 2023 09:00:20 +0800 Subject: [PATCH] Fluid clean remove io data (#49301) * replace paddle.fluid.layers.data and remove io.data * partial commit * partial commit * partial commit * partial commit * partial commit * partial commit * remove data in fluid.layers.io.__all__ * fix errors * fix unitests * fix unitest * fix unitests * fix unitest * fix unitest * fix unitests * fix unitest * fix test_layers unitests * fix typro * fix unitest * fix unitest * fix unitest * fix typro * fix unitest test_model_cast_to_bf16 * fix test_reducescatter * fix collective unitest * fix collective unitests * fix collective unitests * add coverage * fix add layers.data * re run ci * fix some typro * fix samplecode error * fix samplecode error --- python/paddle/fluid/contrib/layers/nn.py | 41 +-- .../fluid/contrib/tests/test_correlation.py | 12 +- .../tests/test_image_classification_fp16.py | 14 +- .../contrib/tests/test_model_cast_to_bf16.py | 30 +- .../tests/test_multi_precision_fp16_train.py | 14 +- .../contrib/tests/test_weight_decay_extend.py | 16 +- python/paddle/fluid/executor.py | 1 - python/paddle/fluid/framework.py | 3 +- .../incubate/fleet/tests/fleet_deep_ctr.py | 9 +- .../fluid/incubate/fleet/utils/fleet_util.py | 8 +- .../fluid/incubate/fleet/utils/utils.py | 6 +- python/paddle/fluid/install_check.py | 6 +- python/paddle/fluid/io.py | 6 +- python/paddle/fluid/layers/control_flow.py | 2 +- python/paddle/fluid/layers/io.py | 108 +------ python/paddle/fluid/layers/nn.py | 12 +- python/paddle/fluid/optimizer.py | 55 ++-- python/paddle/fluid/reader.py | 2 +- python/paddle/fluid/regularizer.py | 8 +- .../tests/book/notest_understand_sentiment.py | 6 +- .../fluid/tests/book/test_fit_a_line.py | 6 +- .../tests/book/test_image_classification.py | 6 +- .../fluid/tests/book/test_recognize_digits.py | 4 +- .../tests/book/test_recommender_system.py | 22 +- .../fluid/tests/book/test_word2vec_book.py | 12 +- python/paddle/fluid/tests/test_data_feeder.py | 16 +- python/paddle/fluid/tests/test_detection.py | 7 +- python/paddle/fluid/tests/test_error_clip.py | 4 +- .../tests/unittests/check_nan_inf_base.py | 4 +- .../collective/collective_allgather_api.py | 5 +- .../collective/collective_allreduce_api.py | 5 +- .../collective_allreduce_new_group_api.py | 5 +- .../collective/collective_allreduce_op.py | 6 +- .../collective_allreduce_op_wait.py | 6 +- .../collective/collective_alltoall_api.py | 6 +- .../collective/collective_broadcast_api.py | 6 +- .../collective/collective_broadcast_op.py | 6 +- .../collective/collective_concat_op.py | 6 +- .../collective/collective_identity_op.py | 6 +- .../collective/collective_reduce_api.py | 6 +- .../collective/collective_reduce_op.py | 6 +- .../collective_reduce_op_calc_stream.py | 7 +- .../collective/collective_scatter_api.py | 3 +- .../collective/collective_scatter_op.py | 6 +- .../collective/collective_sendrecv_api.py | 4 +- .../collective/collective_sendrecv_op.py | 5 +- .../collective_sendrecv_op_array.py | 5 +- .../collective_sendrecv_op_dynamic_shape.py | 7 +- .../collective/collective_split_op.py | 6 +- .../fleet/dist_mnist_gradient_merge.py | 6 +- .../collective/fleet/pipeline_mnist.py | 8 +- .../fleet/pipeline_mnist_multi_device.py | 8 +- .../fleet/pipeline_mnist_one_device.py | 8 +- .../fleet/test_communicator_half_async.py | 4 +- .../fleet/test_communicator_sync.py | 4 +- .../fleet/test_distributed_strategy.py | 4 +- ...est_fleet_fp16_allreduce_meta_optimizer.py | 8 +- ...st_fleet_graph_execution_meta_optimizer.py | 32 +- .../fleet/test_fleet_graph_executor.py | 8 +- .../fleet/test_fleet_lamb_meta_optimizer.py | 14 +- .../fleet/test_fleet_lars_meta_optimizer.py | 14 +- .../fleet/test_fleet_meta_optimizer_base.py | 8 +- .../test_fleet_pipeline_meta_optimizer.py | 12 +- ..._pipeline_meta_optimizer_with_recompute.py | 8 +- .../test_fleet_raw_program_meta_optimizer.py | 6 +- .../fleet/test_fleet_rolemaker_new.py | 4 +- .../unittests/collective_allgather_op.py | 6 +- .../unittests/collective_reducescatter.py | 6 +- .../unittests/collective_reducescatter_op.py | 6 +- .../tests/unittests/dist_allreduce_op.py | 6 +- .../paddle/fluid/tests/unittests/dist_ctr.py | 9 +- .../fluid/tests/unittests/dist_fleet_ctr.py | 9 +- .../dist_fleet_heter_pipeline_ctr.py | 9 +- .../dist_fleet_raw_program_optimizer.py | 6 +- ...et_raw_program_optimizer_fuse_allreduce.py | 6 +- .../tests/unittests/dist_fleet_simnet_bow.py | 14 +- .../dist_fleet_sparse_embedding_ctr.py | 9 +- .../fluid/tests/unittests/dist_mnist.py | 6 +- .../tests/unittests/dist_mnist_batch_merge.py | 6 +- .../unittests/dist_mnist_fp16_allreduce.py | 6 +- .../fluid/tests/unittests/dist_mnist_lars.py | 6 +- .../fluid/tests/unittests/dist_se_resnext.py | 6 +- .../tests/unittests/dist_sharding_save.py | 8 +- .../unittests/dist_text_classification.py | 10 +- .../fluid/tests/unittests/dist_transformer.py | 3 +- .../fluid/tests/unittests/dist_word2vec.py | 20 +- .../distribution/test_distribution_normal.py | 85 +++-- .../distribution/test_distribution_uniform.py | 65 ++-- .../dygraph_to_static/transformer_util.py | 3 +- .../unittests/fleet_heter_ps_training.py | 9 +- .../unittests/fleet_meta_optimizer_base.py | 16 +- .../tests/unittests/fleet_ps_training.py | 5 +- ...r_embedding_eltwise_layernorm_fuse_pass.py | 21 +- .../ir/test_ir_subgraph_python_interface.py | 8 +- .../unittests/ir_memory_optimize_net_base.py | 6 +- .../unittests/mlu/collective_allgather_api.py | 4 +- .../unittests/mlu/collective_allgather_op.py | 5 +- .../unittests/mlu/collective_allreduce_api.py | 4 +- .../unittests/mlu/collective_allreduce_op.py | 6 +- .../unittests/mlu/collective_broadcast_api.py | 6 +- .../unittests/mlu/collective_broadcast_op.py | 6 +- .../unittests/mlu/collective_reduce_api.py | 6 +- .../unittests/mlu/collective_reduce_op.py | 6 +- .../unittests/mlu/sync_batch_norm_op_mlu.py | 3 +- .../unittests/mlu/test_accuracy_op_mlu.py | 6 +- .../unittests/mlu/test_batch_norm_op_mlu.py | 4 +- .../unittests/mlu/test_compare_op_mlu.py | 10 +- .../mlu/test_conv2d_transposed_op_mlu.py | 14 +- .../unittests/mlu/test_expand_as_v2_op_mlu.py | 7 +- .../unittests/mlu/test_expand_v2_op_mlu.py | 11 +- .../mlu/test_fill_constant_op_mlu.py | 4 +- .../unittests/mlu/test_layer_norm_op_mlu.py | 5 +- .../tests/unittests/mlu/test_log_op_mlu.py | 8 +- .../unittests/mlu/test_momentum_op_mlu.py | 8 +- .../unittests/mlu/test_one_hot_v2_op_mlu.py | 9 +- .../tests/unittests/mlu/test_pool2d_op_mlu.py | 15 +- .../tests/unittests/mlu/test_split_op_mlu.py | 4 +- .../tests/unittests/mlu/test_stack_op_mlu.py | 6 +- .../mlu/test_strided_slice_op_mlu.py | 15 +- .../mlu/test_sync_batch_norm_op_mlu_extra.py | 2 +- .../tests/unittests/mlu/test_tile_op_mlu.py | 6 +- .../unittests/mlu/test_transpose_op_mlu.py | 2 +- .../unittests/mlu/test_where_index_op_mlu.py | 2 +- .../tests/unittests/mlu/test_where_op_mlu.py | 38 +-- .../npu/collective_identity_op_npu.py | 4 +- .../unittests/npu/sync_batch_norm_op_npu.py | 5 +- .../tests/unittests/npu/test_atan_op_npu.py | 2 +- .../tests/unittests/npu/test_clip_op_npu.py | 2 +- .../unittests/npu/test_compare_op_npu.py | 10 +- .../npu/test_conv2d_transpose_op_npu.py | 8 +- .../tests/unittests/npu/test_conv3d_op_npu.py | 12 +- .../unittests/npu/test_expand_as_v2_op_npu.py | 7 +- .../unittests/npu/test_expand_v2_op_npu.py | 10 +- .../tests/unittests/npu/test_gather_op_npu.py | 4 +- .../unittests/npu/test_group_norm_op_npu.py | 4 +- .../unittests/npu/test_momentum_op_npu.py | 8 +- .../unittests/npu/test_one_hot_v2_op_npu.py | 2 +- .../tests/unittests/npu/test_sin_op_npu.py | 2 +- .../tests/unittests/npu/test_split_op_npu.py | 4 +- .../tests/unittests/npu/test_stack_op_npu.py | 6 +- .../npu/test_strided_slice_op_npu.py | 15 +- .../npu/test_sync_batch_norm_op_npu_extra.py | 2 +- .../unittests/npu/test_where_index_npu.py | 2 +- .../tests/unittests/npu/test_where_op_npu.py | 4 +- .../tests/unittests/npu/test_while_op_npu.py | 12 +- .../sequence/test_sequence_concat.py | 20 +- .../unittests/sequence/test_sequence_conv.py | 4 +- .../sequence/test_sequence_first_step.py | 4 +- .../sequence/test_sequence_last_step.py | 4 +- .../sequence/test_sequence_pad_op.py | 10 +- .../sequence/test_sequence_reshape.py | 7 +- .../sequence/test_sequence_reverse.py | 5 +- .../fluid/tests/unittests/seresnext_net.py | 6 +- .../fluid/tests/unittests/simple_nets.py | 14 +- .../fluid/tests/unittests/test_accuracy_op.py | 8 +- .../unittests/test_activation_nn_grad.py | 45 ++- .../tests/unittests/test_activation_op.py | 41 +-- .../fluid/tests/unittests/test_adadelta_op.py | 4 +- .../fluid/tests/unittests/test_addmm_op.py | 57 +--- .../fluid/tests/unittests/test_argsort_op.py | 12 +- .../unittests/test_array_read_write_op.py | 6 +- .../fluid/tests/unittests/test_assign_op.py | 5 +- .../test_async_ssa_graph_executor_mnist.py | 6 +- .../unittests/test_auto_parallel_mapper.py | 6 +- .../tests/unittests/test_batch_norm_op.py | 8 +- .../fluid/tests/unittests/test_bmm_op.py | 4 +- .../fluid/tests/unittests/test_boxps.py | 8 +- .../unittests/test_broadcast_tensors_op.py | 32 +- .../tests/unittests/test_broadcast_to_op.py | 11 +- .../fluid/tests/unittests/test_cast_op.py | 5 +- .../unittests/test_communicator_async.py | 4 +- .../tests/unittests/test_communicator_geo.py | 9 +- .../unittests/test_communicator_ps_gpu.py | 4 +- .../fluid/tests/unittests/test_compare_op.py | 12 +- .../tests/unittests/test_compiled_program.py | 6 +- .../fluid/tests/unittests/test_concat_op.py | 27 +- .../tests/unittests/test_conditional_block.py | 2 +- .../fluid/tests/unittests/test_conv2d_api.py | 18 +- .../fluid/tests/unittests/test_conv2d_op.py | 4 +- .../unittests/test_conv2d_transpose_op.py | 16 +- .../fluid/tests/unittests/test_conv3d_op.py | 13 +- .../test_conv3d_transpose_part2_op.py | 12 +- .../tests/unittests/test_conv_nn_grad.py | 41 ++- .../unittests/test_conv_transpose_nn_grad.py | 11 +- .../tests/unittests/test_cross_entropy_op.py | 8 +- .../fluid/tests/unittests/test_cross_op.py | 12 +- .../paddle/fluid/tests/unittests/test_data.py | 24 +- .../tests/unittests/test_data_norm_op.py | 3 +- .../fluid/tests/unittests/test_dataset.py | 72 ++--- .../test_dataset_consistency_inspection.py | 40 ++- .../unittests/test_dataset_dataloader.py | 8 +- .../unittests/test_decoupled_py_reader.py | 8 +- .../test_decoupled_py_reader_data_check.py | 8 +- .../fluid/tests/unittests/test_desc_clone.py | 24 +- .../test_dist_fleet_a_sync_optimizer_async.py | 8 +- .../test_dist_fleet_a_sync_optimizer_auto.py | 6 +- ..._dist_fleet_a_sync_optimizer_auto_async.py | 5 +- ...st_dist_fleet_a_sync_optimizer_auto_geo.py | 4 +- .../test_dist_fleet_a_sync_optimizer_geo.py | 12 +- .../test_dist_fleet_a_sync_optimizer_sync.py | 4 +- .../test_dist_fleet_heter_program.py | 10 +- .../unittests/test_dist_fleet_minimize.py | 14 +- .../tests/unittests/test_dist_fleet_ps.py | 14 +- .../tests/unittests/test_dist_fleet_ps11.py | 18 +- .../tests/unittests/test_dist_fleet_ps12.py | 14 +- .../tests/unittests/test_dist_fleet_ps13.py | 14 +- .../tests/unittests/test_dist_fleet_ps2.py | 14 +- .../tests/unittests/test_dist_fleet_ps3.py | 14 +- .../tests/unittests/test_dist_fleet_ps4.py | 14 +- .../tests/unittests/test_dist_fleet_ps5.py | 14 +- .../tests/unittests/test_dist_fleet_ps6.py | 14 +- .../test_dist_fleet_sparse_embedding_ctr.py | 9 +- .../tests/unittests/test_dist_fleet_spmt.py | 14 +- .../test_dist_fleet_trainer_desc_config.py | 4 +- .../unittests/test_dist_mnist_fleetapi.py | 2 +- .../fluid/tests/unittests/test_dist_train.py | 18 +- .../tests/unittests/test_dist_transpiler.py | 84 ++--- .../tests/unittests/test_dist_tree_index.py | 19 +- .../fluid/tests/unittests/test_dot_op.py | 18 +- .../fluid/tests/unittests/test_downpoursgd.py | 12 +- .../fluid/tests/unittests/test_dropout_op.py | 4 +- .../unittests/test_dygraph_multi_forward.py | 8 +- .../test_eager_deletion_delete_vars.py | 4 +- .../test_eager_deletion_dynamic_rnn_base.py | 6 +- .../test_eager_deletion_padding_rnn.py | 20 +- .../test_eager_deletion_recurrent_op.py | 36 +-- .../unittests/test_eager_deletion_while_op.py | 12 +- .../unittests/test_elementwise_nn_grad.py | 49 ++- .../fluid/tests/unittests/test_entry_attr.py | 8 +- .../fluid/tests/unittests/test_entry_attr2.py | 8 +- .../fluid/tests/unittests/test_exception.py | 4 +- .../tests/unittests/test_executor_and_mul.py | 9 +- .../test_executor_and_use_program_cache.py | 11 +- .../tests/unittests/test_expand_as_v2_op.py | 13 +- .../tests/unittests/test_expand_v2_op.py | 16 +- .../fluid/tests/unittests/test_fc_op.py | 7 +- .../test_feed_data_check_shape_type.py | 2 +- .../tests/unittests/test_fetch_unmerged.py | 8 +- .../tests/unittests/test_fill_constant_op.py | 4 +- .../fluid/tests/unittests/test_fleet.py | 6 +- .../tests/unittests/test_fleet_api_input.py | 4 +- .../fluid/tests/unittests/test_fleet_auto.py | 6 +- .../tests/unittests/test_fleet_base_2.py | 10 +- .../tests/unittests/test_fleet_base_3.py | 12 +- .../tests/unittests/test_fleet_executor.py | 10 +- .../test_fleet_executor_multi_devices.py | 4 +- .../test_fleet_executor_origin_scheduler.py | 10 +- .../test_fleet_executor_with_task_nodes.py | 10 +- .../tests/unittests/test_fleet_nocvm_1.py | 6 +- .../tests/unittests/test_fleet_rolemaker.py | 16 +- .../tests/unittests/test_fleet_rolemaker_2.py | 16 +- .../tests/unittests/test_fleet_rolemaker_3.py | 16 +- .../unittests/test_fleet_unitaccessor.py | 16 +- .../paddle/fluid/tests/unittests/test_flip.py | 5 +- .../tests/unittests/test_fuse_bn_act_pass.py | 6 +- .../unittests/test_fuse_bn_add_act_pass.py | 12 +- .../test_fuse_relu_depthwise_conv_pass.py | 4 +- .../unittests/test_fused_emb_seq_pool_op.py | 5 +- .../tests/unittests/test_gather_nd_op.py | 38 ++- .../fluid/tests/unittests/test_gather_op.py | 6 +- .../tests/unittests/test_gather_tree_op.py | 50 +-- .../unittests/test_generator_dataloader.py | 8 +- .../tests/unittests/test_group_norm_op.py | 4 +- .../fluid/tests/unittests/test_hsigmoid_op.py | 12 +- .../tests/unittests/test_identity_loss_op.py | 2 +- .../test_image_classification_layer.py | 20 +- .../tests/unittests/test_imperative_deepcf.py | 6 +- .../unittests/test_imperative_framework.py | 4 +- .../tests/unittests/test_imperative_gan.py | 12 +- .../tests/unittests/test_imperative_gnn.py | 21 +- ..._imperative_lod_tensor_to_selected_rows.py | 7 +- .../tests/unittests/test_imperative_mnist.py | 8 +- .../test_imperative_mnist_sorted_gradient.py | 8 +- .../test_imperative_ocr_attention_model.py | 16 +- .../unittests/test_imperative_optimizer.py | 8 +- .../unittests/test_imperative_optimizer_v2.py | 8 +- .../unittests/test_imperative_ptb_rnn.py | 16 +- ...test_imperative_ptb_rnn_sorted_gradient.py | 16 +- .../test_imperative_recurrent_usage.py | 8 +- .../test_imperative_reinforcement.py | 12 +- .../tests/unittests/test_imperative_resnet.py | 8 +- .../test_imperative_resnet_sorted_gradient.py | 8 +- .../unittests/test_imperative_se_resnext.py | 8 +- ..._imperative_selected_rows_to_lod_tensor.py | 7 +- ..._imperative_transformer_sorted_gradient.py | 3 +- .../tests/unittests/test_index_select_op.py | 12 +- .../unittests/test_inference_model_io.py | 29 +- .../tests/unittests/test_inplace_abn_op.py | 6 +- ...test_inplace_softmax_with_cross_entropy.py | 8 +- .../tests/unittests/test_instance_norm_op.py | 8 +- .../tests/unittests/test_ir_inplace_pass.py | 4 +- .../unittests/test_ir_memory_optimize_pass.py | 5 +- .../fluid/tests/unittests/test_lambv2_op.py | 6 +- .../tests/unittests/test_layer_norm_op.py | 11 +- .../fluid/tests/unittests/test_layers.py | 303 +++++++++--------- .../unittests/test_listen_and_serv_op.py | 8 +- .../unittests/test_load_vars_shape_check.py | 4 +- .../unittests/test_lookup_table_bf16_op.py | 5 +- .../unittests/test_lookup_table_v2_bf16_op.py | 4 +- .../unittests/test_lookup_table_v2_op.py | 6 +- .../tests/unittests/test_math_op_patch.py | 55 ++-- .../fluid/tests/unittests/test_mean_op.py | 13 +- .../test_memory_reuse_exclude_feed_var.py | 4 +- .../tests/unittests/test_memory_usage.py | 4 +- .../test_mix_precision_all_reduce_fuse.py | 6 +- .../fluid/tests/unittests/test_momentum_op.py | 8 +- .../fluid/tests/unittests/test_monitor.py | 4 +- .../fluid/tests/unittests/test_mse_loss.py | 30 +- .../unittests/test_multihead_attention.py | 6 +- .../fluid/tests/unittests/test_name_scope.py | 2 +- .../paddle/fluid/tests/unittests/test_nce.py | 18 +- .../unittests/test_network_with_dtype.py | 4 +- .../test_nn_functional_embedding_static.py | 11 +- .../unittests/test_nn_functional_hot_op.py | 15 +- .../fluid/tests/unittests/test_nn_grad.py | 41 ++- .../fluid/tests/unittests/test_nonzero_api.py | 12 +- .../unittests/test_normalization_wrapper.py | 3 +- .../tests/unittests/test_npair_loss_op.py | 9 +- .../tests/unittests/test_one_hot_v2_op.py | 17 +- .../fluid/tests/unittests/test_optimizer.py | 16 +- .../test_optimizer_in_control_flow.py | 2 +- .../test_parallel_executor_drop_scope.py | 2 +- .../test_parallel_executor_dry_run.py | 4 +- .../test_parallel_executor_fetch_feed.py | 8 +- .../unittests/test_parallel_executor_mnist.py | 8 +- .../fluid/tests/unittests/test_print_op.py | 5 +- .../fluid/tests/unittests/test_profiler.py | 6 +- .../fluid/tests/unittests/test_program.py | 3 +- .../unittests/test_program_prune_backward.py | 16 +- .../tests/unittests/test_program_to_string.py | 4 +- .../fluid/tests/unittests/test_prune.py | 21 +- .../unittests/test_pull_gpups_sparse_op.py | 4 +- .../fluid/tests/unittests/test_py_func_op.py | 8 +- .../unittests/test_py_reader_combination.py | 8 +- .../unittests/test_py_reader_return_list.py | 4 +- .../test_py_reader_sample_generator.py | 6 +- .../tests/unittests/test_reader_reset.py | 8 +- .../tests/unittests/test_recurrent_op.py | 45 +-- .../fluid/tests/unittests/test_reduce_op.py | 10 +- .../fluid/tests/unittests/test_registry.py | 2 +- .../fluid/tests/unittests/test_regularizer.py | 16 +- .../tests/unittests/test_regularizer_api.py | 16 +- .../fluid/tests/unittests/test_renorm_op.py | 1 - .../unittests/test_repeat_interleave_op.py | 20 +- .../fluid/tests/unittests/test_rmsprop_op.py | 4 +- .../tests/unittests/test_rnn_cell_api.py | 4 +- .../fluid/tests/unittests/test_roll_op.py | 9 +- .../tests/unittests/test_run_program_op.py | 4 +- .../unittests/test_save_model_without_var.py | 5 +- .../fluid/tests/unittests/test_scale_op.py | 5 +- .../tests/unittests/test_scatter_nd_op.py | 59 ++-- .../unittests/test_select_input_output_op.py | 27 +- .../tests/unittests/test_set_bool_attr.py | 4 +- .../fluid/tests/unittests/test_sgd_op_bf16.py | 10 +- ...st_sigmoid_cross_entropy_with_logits_op.py | 8 +- .../fluid/tests/unittests/test_sign_op.py | 29 +- .../fluid/tests/unittests/test_slice_op.py | 19 +- .../fluid/tests/unittests/test_split_op.py | 45 ++- .../fluid/tests/unittests/test_squeeze_op.py | 5 +- .../fluid/tests/unittests/test_stack_op.py | 6 +- .../tests/unittests/test_static_save_load.py | 147 +++++---- .../unittests/test_static_save_load_bf16.py | 16 +- ...tatic_shape_inferrence_for_shape_tensor.py | 4 +- .../tests/unittests/test_strided_slice_op.py | 15 +- .../fluid/tests/unittests/test_sum_op.py | 13 +- .../unittests/test_sync_batch_norm_op.py | 9 +- .../tests/unittests/test_tdm_child_op.py | 5 +- .../tests/unittests/test_tdm_sampler_op.py | 5 +- .../fluid/tests/unittests/test_tile_op.py | 11 +- .../fluid/tests/unittests/test_trainable.py | 4 +- .../tests/unittests/test_transpose_op.py | 13 +- .../tests/unittests/test_uniform_random_op.py | 16 +- .../tests/unittests/test_unsqueeze_op.py | 5 +- .../fluid/tests/unittests/test_variable.py | 2 +- .../tests/unittests/test_weight_decay.py | 8 +- .../unittests/test_weight_normalization.py | 4 +- .../fluid/tests/unittests/test_where_op.py | 60 ++-- .../tests/unittests/test_while_loop_op.py | 2 +- .../fluid/tests/unittests/test_while_op.py | 20 +- .../tests/unittests/transformer_model.py | 10 +- .../xpu/collective_allgather_op_xpu.py | 5 +- .../xpu/collective_allreduce_op_xpu.py | 5 +- .../xpu/collective_identity_op_xpu.py | 5 +- .../unittests/xpu/test_adadelta_op_xpu.py | 8 +- .../tests/unittests/xpu/test_conv3d_op_xpu.py | 13 +- .../unittests/xpu/test_dropout_op_xpu.py | 4 +- .../unittests/xpu/test_expand_as_v2_op_xpu.py | 7 +- .../unittests/xpu/test_expand_v2_op_xpu.py | 10 +- .../unittests/xpu/test_index_select_op_xpu.py | 12 +- .../xpu/test_lookup_table_v2_op_xpu.py | 2 +- .../tests/unittests/xpu/test_mean_op_xpu.py | 12 +- .../unittests/xpu/test_one_hot_v2_op_xpu.py | 5 +- .../xpu/test_sequence_conv_op_xpu.py | 2 +- .../unittests/xpu/test_where_index_xpu.py | 2 +- .../tests/unittests/xpu/test_where_op_xpu.py | 4 +- .../tests/unittests/xpu/test_while_op_xpu.py | 12 +- tools/codestyle/test_docstring_checker.py | 2 +- tools/infrt/fake_models/multi_fc.py | 4 +- 398 files changed, 2401 insertions(+), 2497 deletions(-) diff --git a/python/paddle/fluid/contrib/layers/nn.py b/python/paddle/fluid/contrib/layers/nn.py index 7ab225c8c9..b836dfa451 100644 --- a/python/paddle/fluid/contrib/layers/nn.py +++ b/python/paddle/fluid/contrib/layers/nn.py @@ -98,10 +98,12 @@ def fused_embedding_seq_pool( .. code-block:: python import numpy as np import paddle.fluid as fluid + import paddle + paddle.enable_static() dict_size = 20 - data_t = fluid.layers.data( - name='word', shape=[1], dtype='int64', lod_level=1) + data_t = paddle.static.data( + name='word', shape=[-1, 1], dtype='int64', lod_level=1) padding_idx = np.random.randint(1, 10) out = fluid.contrib.fused_embedding_seq_pool( input=data_t, @@ -305,11 +307,13 @@ def multiclass_nms2( import paddle.fluid as fluid - boxes = fluid.layers.data(name='bboxes', shape=[81, 4], + import paddle + paddle.enable_static() + boxes = paddle.static.data(name='bboxes', shape=[-1, 81, 4], dtype='float32', lod_level=1) - scores = fluid.layers.data(name='scores', shape=[81], + scores = paddle.static.data(name='scores', shape=[-1, 81], dtype='float32', lod_level=1) - out, index = fluid.layers.multiclass_nms2(bboxes=boxes, + out, index = fluid.contrib.layers.multiclass_nms2(bboxes=boxes, scores=scores, background_label=0, score_threshold=0.5, @@ -501,7 +505,9 @@ def shuffle_batch(x, seed=None): .. code-block:: python import paddle.fluid as fluid - x = fluid.layers.data(name="x", shape=[-1, 4]) + import paddle + paddle.enable_static() + x = paddle.static.data(name="x", shape=[-1, 4]) out = fluid.contrib.layers.shuffle_batch(x) """ helper = LayerHelper('shuffle_batch', **locals()) @@ -1313,7 +1319,7 @@ def _pull_box_extended_sparse(input, size, extend_size=64, dtype='float32'): Examples: .. code-block:: python import paddle.fluid as fluid - data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) + data = paddle.static.data(name='sequence', shape=[-1, 1], dtype='int64', lod_level=1) emb, emb_ex = fluid.contrib.layers._pull_box_extended_sparse(input=data, size=8, extend_size=128) """ helper = LayerHelper('pull_box_extended_sparse', **locals()) @@ -1438,15 +1444,14 @@ def correlation( .. code-block:: python import paddle.fluid as fluid - - x1 = fluid.layers.data(name='x1', - shape=x_shape, - dtype=x_type, - append_batch_size=False) - x2 = fluid.layers.data(name='x2', - shape=x_shape, - dtype=x_type, - append_batch_size=False) + import paddle + paddle.enable_static() + x1 = paddle.static.data(name='x1', + shape=[2,3,4,5], + dtype="float32") + x2 = paddle.static.data(name='x2', + shape=[2,3,4,5], + dtype="float32") out = fluid.contrib.correlation( @@ -1555,8 +1560,8 @@ def fused_bn_add_act( # required: gpu def build_program(main_program, startup_program): with fluid.program_guard(main_program, startup_program): - x = fluid.layers.data(name='x', shape=[1, 28, 28], dtype='float32') - y = fluid.layers.data(name="y", shape=[1], dtype='int64') + x = paddle.static.data(name='x', shape=[-1, 1, 28, 28], dtype='float32') + y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') conv1_1 = paddle.static.nn.conv2d( input=x, filter_size=3, diff --git a/python/paddle/fluid/contrib/tests/test_correlation.py b/python/paddle/fluid/contrib/tests/test_correlation.py index 4e9ef9b0fe..0f614f4324 100644 --- a/python/paddle/fluid/contrib/tests/test_correlation.py +++ b/python/paddle/fluid/contrib/tests/test_correlation.py @@ -85,20 +85,20 @@ class TestCorrelationOp(unittest.TestCase): np.set_printoptions(threshold=np.inf) x_shape = (2, 10, 3, 3) x_type = 'float32' - x1 = fluid.layers.data( + x1 = paddle.static.data( name='x1', shape=x_shape, dtype=x_type, - append_batch_size=False, - stop_gradient=False, ) - x2 = fluid.layers.data( + x1.desc.set_need_check_feed(False) + x1.stop_gradient = False + x2 = paddle.static.data( name='x2', shape=x_shape, dtype=x_type, - append_batch_size=False, - stop_gradient=False, ) + x2.desc.set_need_check_feed(False) + x2.stop_gradient = False x1_np = np.random.randn(2, 3, 4, 5).astype(x_type) x2_np = np.random.randn(2, 3, 4, 5).astype(x_type) diff --git a/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py b/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py index f99d640799..5ab54bc716 100644 --- a/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py +++ b/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py @@ -110,10 +110,10 @@ def train(net_type, use_cuda, save_dirname, is_local): train_program.random_seed = 123 startup_prog.random_seed = 456 with fluid.program_guard(train_program, startup_prog): - images = fluid.layers.data( - name='pixel', shape=data_shape, dtype='float32' + images = paddle.static.data( + name='pixel', shape=[-1] + data_shape, dtype='float32' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') if net_type == "vgg": print("train vgg net") @@ -444,11 +444,11 @@ class TestAmpWithNonIterableDataLoader(unittest.TestCase): start_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, start_prog): with paddle.fluid.unique_name.guard(): - image = fluid.layers.data( - name='image', shape=[3, 224, 224], dtype='float32' + image = paddle.static.data( + name='image', shape=[-1, 3, 224, 224], dtype='float32' ) - label = fluid.layers.data( - name='label', shape=[1], dtype='int64' + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) py_reader = fluid.io.DataLoader.from_generator( feed_list=[image, label], diff --git a/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py b/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py index 7254dd9df3..a976385368 100644 --- a/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py +++ b/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py @@ -96,14 +96,22 @@ class TestModelCastBF16(unittest.TestCase): nn_bf16 = amp.bf16.convert_float_to_uint16(nn) with self.static_graph(): - t_bf16 = layers.data( - name='t_bf16', shape=[size, size], dtype=np.int32 + t_bf16 = paddle.static.data( + name='t_bf16', shape=[-1, size, size], dtype='int32' ) - tt_bf16 = layers.data( - name='tt_bf16', shape=[size, size], dtype=np.int32 + t_bf16.desc.set_need_check_feed(False) + tt_bf16 = paddle.static.data( + name='tt_bf16', shape=[-1, size, size], dtype='int32' ) - t = layers.data(name='t', shape=[size, size], dtype='float32') - tt = layers.data(name='tt', shape=[size, size], dtype='float32') + tt_bf16.desc.set_need_check_feed(False) + t = paddle.static.data( + name='t', shape=[-1, size, size], dtype='float32' + ) + t.desc.set_need_check_feed(False) + tt = paddle.static.data( + name='tt', shape=[-1, size, size], dtype='float32' + ) + tt.desc.set_need_check_feed(False) ret = paddle.add(t, tt) ret = paddle.multiply(ret, t) @@ -143,8 +151,14 @@ class TestModelCastBF16(unittest.TestCase): ) with self.static_graph(): - t = layers.data(name='t', shape=[size, size], dtype='float32') - tt = layers.data(name='tt', shape=[size, size], dtype='float32') + t = paddle.static.data( + name='t', shape=[-1, size, size], dtype='float32' + ) + t.desc.set_need_check_feed(False) + tt = paddle.static.data( + name='tt', shape=[-1, size, size], dtype='float32' + ) + tt.desc.set_need_check_feed(False) with amp.bf16.bf16_guard(): ret = paddle.add(t, tt) diff --git a/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py b/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py index 1d4873817a..75754fb8bb 100644 --- a/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py +++ b/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py @@ -102,10 +102,10 @@ def train(use_pure_fp16=True, use_nesterov=False, optimizer=""): train_program.random_seed = 123 startup_prog.random_seed = 456 with fluid.program_guard(train_program, startup_prog): - images = fluid.layers.data( - name='pixel', shape=data_shape, dtype='float32' + images = paddle.static.data( + name='pixel', shape=[-1] + data_shape, dtype='float32' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') net = resnet_cifar10(images) logits = paddle.static.nn.fc(x=net, size=classdim, activation="softmax") cost = paddle.nn.functional.softmax_with_cross_entropy( @@ -275,11 +275,11 @@ class TestAmpWithNonIterableDataLoader(unittest.TestCase): start_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, start_prog): with paddle.fluid.unique_name.guard(): - image = fluid.layers.data( - name='image', shape=[3, 224, 224], dtype='float32' + image = paddle.static.data( + name='image', shape=[-1, 3, 224, 224], dtype='float32' ) - label = fluid.layers.data( - name='label', shape=[1], dtype='int64' + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) py_reader = fluid.io.DataLoader.from_generator( feed_list=[image, label], diff --git a/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py b/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py index 7af54b7d15..870aceb557 100644 --- a/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py +++ b/python/paddle/fluid/contrib/tests/test_weight_decay_extend.py @@ -134,10 +134,12 @@ class TestWeightDecay(unittest.TestCase): startup_prog = fluid.framework.Program() with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog): - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 + ) + label = paddle.static.data( + name="label", shape=[-1, 1], dtype="int64" ) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") avg_cost = model(data, label, self.word_dict_len) AdamW = fluid.contrib.extend_with_decoupled_weight_decay( fluid.optimizer.Adam @@ -158,10 +160,12 @@ class TestWeightDecay(unittest.TestCase): startup_prog = fluid.framework.Program() with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog): - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 + ) + label = paddle.static.data( + name="label", shape=[-1, 1], dtype="int64" ) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") avg_cost = model(data, label, self.word_dict_len) diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index e3376d8446..2822a87a02 100755 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -1862,7 +1862,6 @@ class Executor: vardesc = global_block.desc.find_var(varname.encode()) varobj = global_block.vars[varname] - # Can not check var build by fluid.layers.data(), bucause fluid.layers.data() had not set need_check_feed if ( vardesc.persistable() == False and vardesc.type() == core.VarDesc.VarType.LOD_TENSOR diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index da2fa96c75..8b019114a3 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -6957,9 +6957,10 @@ class Parameter(Variable, metaclass=ParameterMetaClass): .. code-block:: python import paddle.fluid as fluid + import paddle prog = fluid.default_main_program() - rlt = fluid.layers.data("fake_data", shape=[1,1], dtype='float32') + rlt = paddle.static.data("fake_data", shape=[-1,1,1], dtype='float32') debug_str = prog.to_string(throw_on_error=True, with_details=False) print(debug_str) """ diff --git a/python/paddle/fluid/incubate/fleet/tests/fleet_deep_ctr.py b/python/paddle/fluid/incubate/fleet/tests/fleet_deep_ctr.py index e2b7993648..23f5a44fe1 100644 --- a/python/paddle/fluid/incubate/fleet/tests/fleet_deep_ctr.py +++ b/python/paddle/fluid/incubate/fleet/tests/fleet_deep_ctr.py @@ -80,26 +80,23 @@ def model(): train_file_path, ) = ctr_dataset_reader.prepare_data() """ network definition """ - dnn_data = fluid.layers.data( + dnn_data = paddle.static.data( name="dnn_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - lr_data = fluid.layers.data( + lr_data = paddle.static.data( name="lr_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - label = fluid.layers.data( + label = paddle.static.data( name="click", shape=[-1, 1], dtype="int64", lod_level=0, - append_batch_size=False, ) datas = [dnn_data, lr_data, label] diff --git a/python/paddle/fluid/incubate/fleet/utils/fleet_util.py b/python/paddle/fluid/incubate/fleet/utils/fleet_util.py index 4ec3c1d16e..e0ae707be9 100644 --- a/python/paddle/fluid/incubate/fleet/utils/fleet_util.py +++ b/python/paddle/fluid/incubate/fleet/utils/fleet_util.py @@ -1371,8 +1371,8 @@ class FleetUtil: local_total_ins.name) # below is part of example model - label = fluid.layers.data(name="click", shape=[-1, 1],\ - dtype="int64", lod_level=0, append_batch_size=False) + label = paddle.static.data(name="click", shape=[-1, 1],\ + dtype="int64", lod_level=0) emb = my_slot_net(slots, label) # emb can be fc layer of size 1 similarity_norm = fluid.layers.sigmoid(paddle.clip(\ emb, min=-15.0, max=15.0), name="similarity_norm")\ @@ -1571,8 +1571,8 @@ class FleetUtil: local_total_ins.name) # below is part of model - label = fluid.layers.data(name="click", shape=[-1, 1],\ - dtype="int64", lod_level=0, append_batch_size=False) + label = paddle.static.data(name="click", shape=[-1, 1],\ + dtype="int64", lod_level=0) emb = my_slot_net(slots, label) # emb can be fc layer of size 1 similarity_norm = fluid.layers.sigmoid(paddle.clip(\ emb, min=-15.0, max=15.0), name="similarity_norm")\ diff --git a/python/paddle/fluid/incubate/fleet/utils/utils.py b/python/paddle/fluid/incubate/fleet/utils/utils.py index ef022c96ec..4aff834b12 100644 --- a/python/paddle/fluid/incubate/fleet/utils/utils.py +++ b/python/paddle/fluid/incubate/fleet/utils/utils.py @@ -17,6 +17,7 @@ import sys import logging import subprocess import numpy as np +import paddle from collections import OrderedDict import paddle.fluid as fluid from paddle.fluid import core @@ -172,8 +173,9 @@ def save_var(np_array, var_name, shape_list, dtype, save_path): program = fluid.Program() place = fluid.CPUPlace() exe = fluid.Executor(place) + shape = list(shape_list) with fluid.program_guard(program): - d0_data = fluid.layers.data(var_name, shape=shape_list, dtype=dtype) + d0_data = paddle.static.data(var_name, shape=shape, dtype=dtype) append_save_op(program.global_block(), d0_data, save_path) exe.run(feed={var_name: np_array}, fetch_list=[]) @@ -183,7 +185,7 @@ def load_var(var_name, shape_list, dtype, save_path): place = fluid.CPUPlace() exe = fluid.Executor(place) with fluid.program_guard(program): - d0_data = fluid.layers.data(var_name, shape=shape_list, dtype=dtype) + d0_data = paddle.static.data(var_name, shape=shape_list, dtype=dtype) append_load_op(program.global_block(), d0_data, save_path) outs = exe.run(feed={}, fetch_list=[d0_data]) return outs diff --git a/python/paddle/fluid/install_check.py b/python/paddle/fluid/install_check.py index bf1ad9b107..241dd71e20 100644 --- a/python/paddle/fluid/install_check.py +++ b/python/paddle/fluid/install_check.py @@ -103,7 +103,7 @@ def run_check(): with unique_name.guard(): build_strategy = compiler.BuildStrategy() build_strategy.enable_inplace = True - inp = layers.data(name="inp", shape=[2, 2]) + inp = paddle.static.data(name="inp", shape=[-1, 2, 2]) simple_layer = SimpleLayer(input_size=2) out = simple_layer(inp) exe = executor.Executor( @@ -138,9 +138,7 @@ def run_check(): with executor.scope_guard(scope): with program_guard(train_prog, startup_prog): with unique_name.guard(): - inp0 = layers.data( - name="inp", shape=[2, 2], append_batch_size=False - ) + inp0 = paddle.static.data(name="inp", shape=[2, 2]) simple_layer0 = SimpleLayer(input_size=2) out0 = simple_layer0(inp0) param_grads = backward.append_backward( diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 490e9412cb..9faeacee88 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -355,7 +355,7 @@ def save_vars( main_prog = fluid.Program() startup_prog = fluid.Program() with fluid.program_guard(main_prog, startup_prog): - data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) + data = paddle.static.data(name="img", shape=[64, 784]) w = paddle.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') b = paddle.create_parameter(shape=[200], dtype='float32', name='fc_b') hidden_w = paddle.matmul(x=data, y=w) @@ -830,7 +830,7 @@ def load_vars( main_prog = fluid.Program() startup_prog = fluid.Program() with fluid.program_guard(main_prog, startup_prog): - data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) + data = paddle.static.data(name="img", shape=[64, 784]) w = paddle.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') b = paddle.create_parameter(shape=[200], dtype='float32', name='fc_b') hidden_w = paddle.matmul(x=data, y=w) @@ -1598,7 +1598,7 @@ def load_inference_model( main_prog = fluid.Program() startup_prog = fluid.Program() with fluid.program_guard(main_prog, startup_prog): - data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) + data = paddle.static.data(name="img", shape=[-1, 64, 784]) w = paddle.create_parameter(shape=[784, 200], dtype='float32') b = paddle.create_parameter(shape=[200], dtype='float32') hidden_w = paddle.matmul(x=data, y=w) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index e01536b2e3..5c877c1e8f 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -466,7 +466,7 @@ class StaticRNN: is_sparse=False) # transform batch size to dim 1 x_emb = paddle.transpose(x_emb, perm=[1, 0, 2]) - boot_memory = fluid.layers.data(name='boot', shape=[hidden_size], dtype='float32', lod_level=1) + boot_memory = paddle.static.data(name='boot', shape=[-1, hidden_size], dtype='float32', lod_level=1) rnn = fluid.layers.StaticRNN() with rnn.step(): # mark created x_emb as input, each step process a word diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index e33cf23e6c..994fc98038 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -41,108 +41,7 @@ from ..framework import ( _set_expected_place, ) -__all__ = [ - 'data', -] - - -@static_only -def data( - name, - shape, - append_batch_size=True, - dtype='float32', - lod_level=0, - type=core.VarDesc.VarType.LOD_TENSOR, - stop_gradient=True, -): - """ - **Data Layer** - - This operator creates the global variable. The global variables can be - accessed by all the following operators in the graph. - - Note: - :code:`paddle.fluid.layers.data` is deprecated as it will be removed in - a later version. Please use :code:`paddle.fluid.data` . - - This :code:`paddle.fluid.layers.data` set shape and dtype at compile - time but does NOT check the shape or the dtype of fed data, the - :code:`paddle.fluid.data` checks the shape and the dtype of data fed - by Executor or ParallelExecutor during run time. - - To feed variable size inputs, users can feed variable size inputs - directly to this :code:`paddle.fluid.layers.data` and PaddlePaddle will - fit the size accordingly. Or set -1 on the variable dimension when using - :code:`paddle.fluid.data` . - - The default :code:`stop_gradient` attribute of the Variable created by - this API is true, which means the gradient won't be passed backward - through the data Varaible. Set :code:`var.stop_gradient = False` If - user would like to pass backward gradient. - - Args: - name(str): The name/alias of the variable, see :ref:`api_guide_Name` - for more details. - shape(list|tuple): Tuple declaring the shape. If :code:`append_batch_size` is - True and there is no -1 inside :code:`shape`, it should be - considered as the shape of the each sample. Otherwise, it should - be considered as the shape of the batched data. - append_batch_size(bool): - 1. If true, it prepends -1 to the shape. - For example if shape=[1], the resulting shape is [-1, 1]. This will - be useful to set different batch size at run time. - 2. If shape contains -1, such as shape=[1, -1]. - append_batch_size will be enforced to be be False (ineffective) - because PaddlePaddle cannot set more than 1 unknown number on the - shape. - dtype(np.dtype|VarType|str): The type of the data. Supported dtype: bool, - float16, float32, float64, int8, int16, int32, int64, uint8. - type(VarType): The output type. Supported dtype: VarType.LOD_TENSOR, - VarType.SELECTED_ROWS, VarType.NCCL_ID. Default: VarType.LOD_TENSOR. - lod_level(int): The LoD Level. 0 means the input data is not a sequence. - Default: 0. - stop_gradient(bool): A boolean that mentions whether gradient should flow. - Default: True. - - Returns: - The global variable that gives access to the data. - - Return Type: - Variable - - Examples: - .. code-block:: python - - import paddle.fluid as fluid - data = fluid.layers.data(name='x', shape=[784], dtype='float32') - """ - helper = LayerHelper('data', **locals()) - - check_type(name, 'name', (bytes, str), 'data') - check_type(shape, 'shape', (list, tuple), 'data') - - shape = list(shape) - for i in range(len(shape)): - if shape[i] is None: - shape[i] = -1 - append_batch_size = False - elif shape[i] < 0: - append_batch_size = False - - if append_batch_size: - shape = [-1] + shape # append batch size as -1 - - data_var = helper.create_global_variable( - name=name, - shape=shape, - dtype=dtype, - type=type, - stop_gradient=stop_gradient, - lod_level=lod_level, - is_data=True, - ) - return data_var +__all__ = [] class BlockGuardServ(BlockGuard): @@ -189,11 +88,10 @@ class ListenAndServ: serv = layers.ListenAndServ( "127.0.0.1:6170", ["X"], optimizer_mode=False) with serv.do(): - x = layers.data( + x = paddle.static.data( shape=[32, 32], dtype='float32', - name="X", - append_batch_size=False) + name="X") fluid.initializer.Constant(value=1.0)(x, main.global_block()) paddle.scale(x=x, scale=10.0, out=out_var) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index fa0e4007ea..fa0f49d01b 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -329,7 +329,7 @@ def _pull_sparse( .. code-block:: python import paddle.fluid as fluid - data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) + data = paddle.static.data(name='sequence', shape=[-1, 1], dtype='int64', lod_level=1) emb = fluid.layers.nn._pull_sparse( input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor") """ @@ -403,7 +403,7 @@ def _pull_sparse_v2( .. code-block:: python import paddle.fluid as fluid - data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) + data = paddle.static.data(name='sequence', shape=[-1, 1], dtype='int64', lod_level=1) emb = fluid.layers.nn._pull_sparse_v2( input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor") """ @@ -464,9 +464,9 @@ def _pull_gpups_sparse( import paddle.fluid as fluid slots = [] - data_1 = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) + data_1 = paddle.static.data(name='sequence', shape=[-1,1], dtype='int64', lod_level=1) slots.append(data_1) - data_2 = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) + data_2 = paddle.static.data(name='sequence', shape=[-1,1], dtype='int64', lod_level=1) slots.append(data_2) embs = fluid.layers.pull_gpups_sparse(input=slots, size=[11, 35]) """ @@ -526,7 +526,7 @@ def _pull_box_sparse( .. code-block:: python import paddle.fluid as fluid - data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) + data = paddle.static.data(name='sequence', shape=[-1,1], dtype='int64', lod_level=1) emb = fluid.layers.pull_box_sparse(input=data, size=[11]) """ helper = LayerHelper('pull_box_sparse', **locals()) @@ -711,7 +711,7 @@ def unsqueeze(input, axes, name=None): .. code-block:: python import paddle.fluid as fluid - x = fluid.layers.data(name='x', shape=[5, 10]) + x = paddle.static.data(name='x', shape=[-1, 5, 10], dtype="float32") y = fluid.layers.unsqueeze(input=x, axes=[1]) """ diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 1bab4f3431..52501992ef 100755 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -1431,8 +1431,8 @@ class SGDOptimizer(Optimizer): place = fluid.CPUPlace() main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -1623,8 +1623,8 @@ class MomentumOptimizer(Optimizer): place = fluid.CPUPlace() main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -1772,8 +1772,8 @@ class LarsMomentumOptimizer(Optimizer): paddle.enable_static() np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) - inp = fluid.layers.data( - name="inp", shape=[2, 2], append_batch_size=False) + inp = paddle.static.data( + name="inp", shape=[2, 2], dtype='float32') out = paddle.static.nn.fc(inp, size=3) out = paddle.sum(out) optimizer = fluid.optimizer.LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9) @@ -2764,7 +2764,7 @@ class DpsgdOptimizer(Optimizer): train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - data = fluid.layers.data(name='X', shape=[1], dtype='float32') + data = paddle.static.data(name='X', shape=[-1,1], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) optimizer = fluid.optimizer.Dpsgd(learning_rate=0.01, clip=10.0, batch_size=16.0, sigma=1.0) @@ -3217,8 +3217,8 @@ class RMSPropOptimizer(Optimizer): place = fluid.CPUPlace() main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -3415,8 +3415,8 @@ class FtrlOptimizer(Optimizer): place = fluid.CPUPlace() main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -4354,11 +4354,12 @@ class PipelineOptimizer: import paddle import paddle.fluid as fluid import paddle.fluid.layers as layers + import numpy as np paddle.enable_static() with fluid.device_guard("gpu:0"): - x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=0) - y = fluid.layers.data(name='y', shape=[1], dtype='int64', lod_level=0) + x = paddle.static.data(name='x', shape=[-1, 1], dtype='int64', lod_level=0) + y = paddle.static.data(name='y', shape=[-1, 1], dtype='int64', lod_level=0) data_loader = fluid.io.DataLoader.from_generator( feed_list=[x, y], capacity=64, @@ -6332,8 +6333,8 @@ class RecomputeOptimizer(Optimizer): ) sum_cost = paddle.mean(cost) return sum_cost, fc_1, prediction - input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') - input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1,32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1,1], dtype='int64') cost, fc_1, pred = mlp(input_x, input_y) sgd = fluid.optimizer.Adam(learning_rate=0.01) @@ -6410,8 +6411,8 @@ class RecomputeOptimizer(Optimizer): sum_cost = paddle.mean(cost) return sum_cost, fc_1, prediction - input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') - input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1,32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1,1], dtype='int64') cost, fc_1, pred = mlp(input_x, input_y) print("Finished FF") @@ -6458,8 +6459,8 @@ class RecomputeOptimizer(Optimizer): return sum_cost, fc_1, prediction - input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') - input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1,32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1,1], dtype='int64') cost, fc_1, pred = mlp(input_x, input_y) print("Finished FF") @@ -6952,8 +6953,8 @@ class RecomputeOptimizer(Optimizer): return sum_cost, fc_1, prediction - input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') - input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1,32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1,1], dtype='int64') cost, fc_1, pred = mlp(input_x, input_y) print("Finished FF") @@ -7033,8 +7034,8 @@ class RecomputeOptimizer(Optimizer): sum_cost = paddle.mean(cost) return sum_cost, fc_1, prediction - input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') - input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1,32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1,1], dtype='int64') cost, fc_1, pred = mlp(input_x, input_y) print("Finished FF") @@ -7120,8 +7121,8 @@ class LookaheadOptimizer: paddle.enable_static() - x = fluid.layers.data(name='x', shape=[2], dtype='float32') - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + x = paddle.static.data(name='x', shape=[-1,2], dtype='float32') + label = paddle.static.data(name="label", shape=[-1,1], dtype="int64") y = paddle.static.nn.fc(x=[x], size=2, activation="softmax") loss = paddle.nn.functional.cross_entropy( input=y, label=label, @@ -7311,8 +7312,8 @@ class GradientMergeOptimizer: sum_cost = paddle.mean(cost) return sum_cost, fc_1, prediction - input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') - input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1,32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1,1], dtype='int64') cost, fc_1, pred = mlp(input_x, input_y) sgd = fluid.optimizer.Adam(learning_rate=0.01) sgd = fluid.optimizer.GradientMergeOptimizer(sgd, k_steps=4, avg=True) diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py index e6cabbdde9..8e36dd2800 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/fluid/reader.py @@ -1643,7 +1643,7 @@ class PyReader(DataLoaderBase): Args: feed_list (list(Variable)|tuple(Variable)): feed variable list. - The variables should be created by :code:`fluid.layers.data()`. + The variables should be created by :code:`paddle.static.data()`. capacity (int): capacity of the queue maintained in PyReader. The unit is batch number. Set larger capacity if your reader is fast. diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index 36e648e3e3..84bfa351c9 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -74,8 +74,8 @@ class L2DecayRegularizer(WeightDecayRegularizer): main_prog = fluid.Program() startup_prog = fluid.Program() with fluid.program_guard(main_prog, startup_prog): - data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + data = paddle.static.data(name='image', shape=[-1, 3, 28, 28], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') hidden = paddle.static.nn.fc(x=data, size=128, activation='relu') prediction = paddle.static.nn.fc(x=hidden, size=10, activation='softmax') loss = paddle.nn.functional.cross_entropy( @@ -193,8 +193,8 @@ class L1DecayRegularizer(WeightDecayRegularizer): main_prog = fluid.Program() startup_prog = fluid.Program() with fluid.program_guard(main_prog, startup_prog): - data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + data = paddle.static.data(name='image', shape=[-1, 3, 28, 28], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') hidden = paddle.static.nn.fc(x=data, size=128, activation='relu') prediction = paddle.static.nn.fc(x=hidden, size=10, activation='softmax') loss = paddle.nn.functional.cross_entropy( diff --git a/python/paddle/fluid/tests/book/notest_understand_sentiment.py b/python/paddle/fluid/tests/book/notest_understand_sentiment.py index c406fae5f8..0eb09b210b 100644 --- a/python/paddle/fluid/tests/book/notest_understand_sentiment.py +++ b/python/paddle/fluid/tests/book/notest_understand_sentiment.py @@ -68,10 +68,10 @@ def train( dict_dim = len(word_dict) class_dim = 2 - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 ) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") if not parallel: cost, acc_out, prediction = net_method( diff --git a/python/paddle/fluid/tests/book/test_fit_a_line.py b/python/paddle/fluid/tests/book/test_fit_a_line.py index b6efc9775e..c7c930fd53 100644 --- a/python/paddle/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/fluid/tests/book/test_fit_a_line.py @@ -49,8 +49,10 @@ def convert_float_to_uint16(in_list): def train(use_cuda, save_dirname, is_local, use_bf16, pure_bf16): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + x.desc.set_need_check_feed(False) + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') + y.desc.set_need_check_feed(False) if use_bf16: if not pure_bf16: diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index f1fa47afb8..6ee3395158 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -104,8 +104,10 @@ def train(net_type, use_cuda, save_dirname, is_local): classdim = 10 data_shape = [3, 32, 32] - images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + images = paddle.static.data( + name='pixel', shape=[-1] + data_shape, dtype='float32' + ) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') if net_type == "vgg": print("train vgg net") diff --git a/python/paddle/fluid/tests/book/test_recognize_digits.py b/python/paddle/fluid/tests/book/test_recognize_digits.py index 85d946e218..5d6939af89 100644 --- a/python/paddle/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/fluid/tests/book/test_recognize_digits.py @@ -77,8 +77,8 @@ def train( ): if use_cuda and not fluid.core.is_compiled_with_cuda(): return - img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') if nn_type == 'mlp': net_conf = mlp diff --git a/python/paddle/fluid/tests/book/test_recommender_system.py b/python/paddle/fluid/tests/book/test_recommender_system.py index aad9e7ce01..1564b0594f 100644 --- a/python/paddle/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/fluid/tests/book/test_recommender_system.py @@ -40,7 +40,7 @@ def get_usr_combined_features(): USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1 - uid = layers.data(name='user_id', shape=[1], dtype='int64') + uid = paddle.static.data(name='user_id', shape=[-1, 1], dtype='int64') usr_emb = layers.embedding( input=uid, @@ -54,7 +54,9 @@ def get_usr_combined_features(): USR_GENDER_DICT_SIZE = 2 - usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64') + usr_gender_id = paddle.static.data( + name='gender_id', shape=[-1, 1], dtype='int64' + ) usr_gender_emb = layers.embedding( input=usr_gender_id, @@ -66,7 +68,7 @@ def get_usr_combined_features(): usr_gender_fc = paddle.static.nn.fc(x=usr_gender_emb, size=16) USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table) - usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64") + usr_age_id = paddle.static.data(name='age_id', shape=[-1, 1], dtype="int64") usr_age_emb = layers.embedding( input=usr_age_id, @@ -78,7 +80,7 @@ def get_usr_combined_features(): usr_age_fc = paddle.static.nn.fc(x=usr_age_emb, size=16) USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1 - usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64") + usr_job_id = paddle.static.data(name='job_id', shape=[-1, 1], dtype="int64") usr_job_emb = layers.embedding( input=usr_job_id, @@ -104,7 +106,7 @@ def get_mov_combined_features(): MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1 - mov_id = layers.data(name='movie_id', shape=[1], dtype='int64') + mov_id = paddle.static.data(name='movie_id', shape=[-1, 1], dtype='int64') mov_emb = layers.embedding( input=mov_id, @@ -118,8 +120,8 @@ def get_mov_combined_features(): CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories()) - category_id = layers.data( - name='category_id', shape=[1], dtype='int64', lod_level=1 + category_id = paddle.static.data( + name='category_id', shape=[-1, 1], dtype='int64', lod_level=1 ) mov_categories_emb = layers.embedding( @@ -132,8 +134,8 @@ def get_mov_combined_features(): MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict()) - mov_title_id = layers.data( - name='movie_title', shape=[1], dtype='int64', lod_level=1 + mov_title_id = paddle.static.data( + name='movie_title', shape=[-1, 1], dtype='int64', lod_level=1 ) mov_title_emb = layers.embedding( @@ -170,7 +172,7 @@ def model(): ) scale_infer = paddle.scale(x=inference, scale=5.0) - label = layers.data(name='score', shape=[1], dtype='float32') + label = paddle.static.data(name='score', shape=[-1, 1], dtype='float32') square_cost = paddle.nn.functional.square_error_cost( input=scale_infer, label=label ) diff --git a/python/paddle/fluid/tests/book/test_word2vec_book.py b/python/paddle/fluid/tests/book/test_word2vec_book.py index e932394e8c..f117c523a7 100644 --- a/python/paddle/fluid/tests/book/test_word2vec_book.py +++ b/python/paddle/fluid/tests/book/test_word2vec_book.py @@ -108,11 +108,13 @@ def train( word_dict = paddle.dataset.imikolov.build_dict() dict_size = len(word_dict) - first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64') - second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64') - third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64') - forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64') - next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') + first_word = paddle.static.data(name='firstw', shape=[-1, 1], dtype='int64') + second_word = paddle.static.data( + name='secondw', shape=[-1, 1], dtype='int64' + ) + third_word = paddle.static.data(name='thirdw', shape=[-1, 1], dtype='int64') + forth_word = paddle.static.data(name='forthw', shape=[-1, 1], dtype='int64') + next_word = paddle.static.data(name='nextw', shape=[-1, 1], dtype='int64') if not is_parallel: avg_cost, predict_word = __network__( diff --git a/python/paddle/fluid/tests/test_data_feeder.py b/python/paddle/fluid/tests/test_data_feeder.py index 4cbdba6419..517d874f45 100644 --- a/python/paddle/fluid/tests/test_data_feeder.py +++ b/python/paddle/fluid/tests/test_data_feeder.py @@ -22,8 +22,8 @@ paddle.enable_static() class TestDataFeeder(unittest.TestCase): def test_lod_level_0_converter(self): - img = fluid.layers.data(name='image', shape=[1, 28, 28]) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data(name='image', shape=[-1, 1, 28, 28]) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') feeder = fluid.DataFeeder([img, label], fluid.CPUPlace()) result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])]) @@ -41,10 +41,10 @@ class TestDataFeeder(unittest.TestCase): def test_lod_level_1_converter(self): # lod_level = 1 # each sentence has a different number of words - sentences = fluid.layers.data( - name='sentences', shape=[1], dtype='int64', lod_level=1 + sentences = paddle.static.data( + name='sentences', shape=[-1, 1], dtype='int64', lod_level=1 ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') feeder = fluid.DataFeeder([sentences, label], fluid.CPUPlace()) # lod = [[0, 3, 5, 9]] @@ -64,10 +64,10 @@ class TestDataFeeder(unittest.TestCase): def test_lod_level_2_converter(self): # lod_level = 2 # paragraphs -> sentences -> words - paragraphs = fluid.layers.data( - name='paragraphs', shape=[1], dtype='int64', lod_level=2 + paragraphs = paddle.static.data( + name='paragraphs', shape=[-1, 1], dtype='int64', lod_level=2 ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') feeder = fluid.DataFeeder([paragraphs, label], fluid.CPUPlace()) # lod = [[0, 2, 3], [0, 3, 5, 9]] diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index aa97a32054..39b17cd063 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -20,7 +20,6 @@ from unittests.test_imperative_base import new_program_scope import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core from paddle.fluid.dygraph import base from paddle.fluid.framework import Program, program_guard @@ -154,10 +153,12 @@ class TestMulticlassNMS2(unittest.TestCase): def test_multiclass_nms2(self): program = Program() with program_guard(program): - bboxes = layers.data( + bboxes = paddle.static.data( name='bboxes', shape=[-1, 10, 4], dtype='float32' ) - scores = layers.data(name='scores', shape=[-1, 10], dtype='float32') + scores = paddle.static.data( + name='scores', shape=[-1, 10], dtype='float32' + ) output = fluid.contrib.multiclass_nms2( bboxes, scores, 0.3, 400, 200, 0.7 ) diff --git a/python/paddle/fluid/tests/test_error_clip.py b/python/paddle/fluid/tests/test_error_clip.py index 9dc0771a39..0c1d861493 100644 --- a/python/paddle/fluid/tests/test_error_clip.py +++ b/python/paddle/fluid/tests/test_error_clip.py @@ -23,13 +23,13 @@ paddle.enable_static() prog = fluid.framework.Program() with fluid.program_guard(main_program=prog): - image = fluid.layers.data(name='x', shape=[784], dtype='float32') + image = paddle.static.data(name='x', shape=[-1, 784], dtype='float32') hidden1 = paddle.static.nn.fc(x=image, size=128, activation='relu') hidden2 = paddle.static.nn.fc(x=hidden1, size=64, activation='relu') predict = paddle.static.nn.fc(x=hidden2, size=10, activation='softmax') - label = fluid.layers.data(name='y', shape=[1], dtype='int64') + label = paddle.static.data(name='y', shape=[-1, 1], dtype='int64') cost = paddle.nn.functional.cross_entropy( input=predict, label=label, reduction='none', use_softmax=False diff --git a/python/paddle/fluid/tests/unittests/check_nan_inf_base.py b/python/paddle/fluid/tests/unittests/check_nan_inf_base.py index 551a9eed8b..954e06e2a8 100644 --- a/python/paddle/fluid/tests/unittests/check_nan_inf_base.py +++ b/python/paddle/fluid/tests/unittests/check_nan_inf_base.py @@ -46,8 +46,8 @@ def generator(): def net(): - x = fluid.layers.data(name="x", shape=[3], dtype='float32') - y = fluid.layers.data(name="y", shape=[1], dtype='int64') + x = paddle.static.data(name="x", shape=[-1, 3], dtype='float32') + y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') # test int64 value zero = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) diff --git a/python/paddle/fluid/tests/unittests/collective/collective_allgather_api.py b/python/paddle/fluid/tests/unittests/collective/collective_allgather_api.py index 72e28c21f2..6eb1f56286 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_allgather_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_allgather_api.py @@ -20,7 +20,6 @@ import test_collective_api_base as test_base import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers paddle.enable_static() @@ -33,7 +32,9 @@ class TestCollectiveAllgatherAPI(test_base.TestCollectiveAPIRunnerBase): dtype = "float32" if dtype is None else dtype with fluid.program_guard(main_prog, startup_program): tensor_list = [] - tindata = layers.data(name="tindata", shape=[10, 1000], dtype=dtype) + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype=dtype + ) paddle.distributed.all_gather(tensor_list, tindata) return tensor_list diff --git a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_api.py b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_api.py index e995cd8d37..291ad384f3 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_api.py @@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers paddle.enable_static() @@ -27,8 +26,8 @@ class TestCollectiveAllreduceAPI(TestCollectiveAPIRunnerBase): def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) paddle.distributed.all_reduce(tindata) return [tindata] diff --git a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_new_group_api.py b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_new_group_api.py index ba5580eb28..b66fdbedd2 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_new_group_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_new_group_api.py @@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers paddle.enable_static() @@ -27,8 +26,8 @@ class TestCollectiveAllreduceNewGroupAPI(TestCollectiveAPIRunnerBase): def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[1, 10, 1000], dtype='float32' ) gp = paddle.distributed.new_group([0, 1]) paddle.distributed.all_reduce(tindata, group=gp, sync_op=True) diff --git a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op.py b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op.py index 3f3d0e8c0a..250156905e 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -29,9 +28,10 @@ class TestCollectiveAllreduce(TestCollectiveRunnerBase): def get_model(self, main_prog, startup_program): ring_id = 0 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) toutdata = main_prog.current_block().create_var( name="outofallreduce", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op_wait.py b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op_wait.py index 0fce303f78..b0d4eeec95 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op_wait.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_allreduce_op_wait.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -29,9 +28,10 @@ class TestCollectiveAllreduce(TestCollectiveRunnerBase): def get_model(self, main_prog, startup_program): ring_id = 0 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) toutdata = main_prog.current_block().create_var( name="outofallreduce", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/collective/collective_alltoall_api.py b/python/paddle/fluid/tests/unittests/collective/collective_alltoall_api.py index 0bfc3951f9..1b5d0082ea 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_alltoall_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_alltoall_api.py @@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers paddle.enable_static() @@ -27,9 +26,10 @@ class TestCollectiveAllToAllAPI(TestCollectiveAPIRunnerBase): def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) tindata = paddle.split(tindata, 2, axis=0) tout_data = [] paddle.distributed.alltoall(tindata, tout_data) diff --git a/python/paddle/fluid/tests/unittests/collective/collective_broadcast_api.py b/python/paddle/fluid/tests/unittests/collective/collective_broadcast_api.py index 046501ca20..5011a5b79e 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_broadcast_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_broadcast_api.py @@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers paddle.enable_static() @@ -27,9 +26,10 @@ class TestCollectiveBroadcastAPI(TestCollectiveAPIRunnerBase): def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) paddle.distributed.broadcast(tindata, src=1) return [tindata] diff --git a/python/paddle/fluid/tests/unittests/collective/collective_broadcast_op.py b/python/paddle/fluid/tests/unittests/collective/collective_broadcast_op.py index 8d02dd771b..7c49f1f55e 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_broadcast_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_broadcast_op.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -30,9 +29,10 @@ class TestCollectiveBroadcast(TestCollectiveRunnerBase): ring_id = 0 rootid = 1 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) toutdata = main_prog.current_block().create_var( name="outofbroadcast", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/collective/collective_concat_op.py b/python/paddle/fluid/tests/unittests/collective/collective_concat_op.py index dd85905caf..df784d2815 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_concat_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_concat_op.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -30,9 +29,10 @@ class TestCollectiveConcat(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) toutdata = main_prog.current_block().create_var( name="outofconcat", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/collective/collective_identity_op.py b/python/paddle/fluid/tests/unittests/collective/collective_identity_op.py index c4b993d8a3..4926076b6b 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_identity_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_identity_op.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -30,9 +29,10 @@ class TestCollectiveIdentity(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) toutdata = main_prog.current_block().create_var( name="outofgather", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/collective/collective_reduce_api.py b/python/paddle/fluid/tests/unittests/collective/collective_reduce_api.py index fb16088b4b..6f033c7d1f 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_reduce_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_reduce_api.py @@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers paddle.enable_static() @@ -27,9 +26,10 @@ class TestCollectiveReduceAPI(TestCollectiveAPIRunnerBase): def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) paddle.distributed.reduce(tindata, dst=0) return [tindata] diff --git a/python/paddle/fluid/tests/unittests/collective/collective_reduce_op.py b/python/paddle/fluid/tests/unittests/collective/collective_reduce_op.py index 8e75d5ee91..9c34082113 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_reduce_op.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -30,9 +29,10 @@ class TestCollectiveReduce(TestCollectiveRunnerBase): ring_id = 0 rootid = 1 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) toutdata = main_prog.current_block().create_var( name="outofreduce", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/collective/collective_reduce_op_calc_stream.py b/python/paddle/fluid/tests/unittests/collective/collective_reduce_op_calc_stream.py index 7d122764f5..a4ae2a3623 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_reduce_op_calc_stream.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_reduce_op_calc_stream.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -30,9 +29,11 @@ class TestCollectiveReduce(TestCollectiveRunnerBase): ring_id = 0 rootid = 1 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) + toutdata = main_prog.current_block().create_var( name="outofreduce", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/collective/collective_scatter_api.py b/python/paddle/fluid/tests/unittests/collective/collective_scatter_api.py index f4671af0df..de7ec2cfcd 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_scatter_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_scatter_api.py @@ -27,11 +27,10 @@ class TestCollectiveScatterAPI(TestCollectiveAPIRunnerBase): def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( + tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype='float32', - append_batch_size=False, ) toutdata = layers.fill_constant( shape=[5, 1000], dtype='float32', value=1.0 diff --git a/python/paddle/fluid/tests/unittests/collective/collective_scatter_op.py b/python/paddle/fluid/tests/unittests/collective/collective_scatter_op.py index 798485b638..56aa7210d2 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_scatter_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_scatter_op.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -30,9 +29,10 @@ class TestCollectiveScatter(TestCollectiveRunnerBase): ring_id = 0 rootid = 1 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) toutdata = main_prog.current_block().create_var( name="outofreduce", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_api.py b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_api.py index bcdb8342ca..e0a2487743 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_api.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_api.py @@ -16,7 +16,6 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers paddle.enable_static() @@ -27,11 +26,10 @@ class TestCollectiveSendRecvAPI(TestCollectiveAPIRunnerBase): def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( + tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype='float32', - append_batch_size=False, ) if rank == 0: paddle.distributed.send(tindata, dst=1) diff --git a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op.py b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op.py index eab4980733..8b2abc74f0 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers paddle.enable_static() @@ -28,12 +27,12 @@ class TestCollectiveSendRecv(TestCollectiveRunnerBase): def get_model(self, main_prog, startup_program): ring_id = self.global_ring_id with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( + tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype='float64', - append_batch_size=False, ) + tindata.desc.set_need_check_feed(False) if self.rank == 0: main_prog.global_block().append_op( type="send_v2", diff --git a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_array.py b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_array.py index 8c4ebeaffd..bea4a71089 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_array.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_array.py @@ -17,7 +17,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers paddle.enable_static() @@ -29,12 +28,12 @@ class TestCollectiveSendRecv(TestCollectiveRunnerBase): def get_model(self, main_prog, startup_program): ring_id = self.global_ring_id with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( + tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype='float64', - append_batch_size=False, ) + tindata.desc.set_need_check_feed(False) if self.rank == 0: data1 = fluid.layers.assign( np.array([[0, 1, 2]], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_dynamic_shape.py b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_dynamic_shape.py index 80a0176782..2f1aaac0b1 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_dynamic_shape.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_sendrecv_op_dynamic_shape.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers paddle.enable_static() @@ -28,12 +27,12 @@ class TestCollectiveSendRecvDynamicShape(TestCollectiveRunnerBase): def get_model(self, main_prog, startup_program): ring_id = self.global_ring_id with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( + tindata = paddle.static.data( name="tindata", - shape=[10, 1000], + shape=[-1, 10, 1000], dtype='float64', - append_batch_size=False, ) + tindata.desc.set_need_check_feed(False) if self.rank == 0: main_prog.global_block().append_op( type="send_v2", diff --git a/python/paddle/fluid/tests/unittests/collective/collective_split_op.py b/python/paddle/fluid/tests/unittests/collective/collective_split_op.py index 127ab924da..3348d4c9c9 100644 --- a/python/paddle/fluid/tests/unittests/collective/collective_split_op.py +++ b/python/paddle/fluid/tests/unittests/collective/collective_split_op.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -30,9 +29,10 @@ class TestCollectiveAllGather(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) toutdata = main_prog.current_block().create_var( name="outofsplit", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge.py b/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge.py index 01d6d970c6..35525d03ca 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dist_mnist_gradient_merge.py @@ -29,8 +29,10 @@ fluid.default_main_program().random_seed = 1 class TestDistMnist2x2(TestDistRunnerBase): def get_model(self, batch_size=2): # Input data - images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + images = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE + ) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') # Train program predict = cnn_model(images) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist.py b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist.py index c33e5bc27a..2eb0951756 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist.py @@ -85,10 +85,12 @@ class TestDistMnist2x2(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data with fluid.device_guard("gpu:0"): - images = fluid.layers.data( - name='pixel', shape=[1, 28, 28], dtype=DTYPE + images = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') if dist_strategy: data_loader = fluid.io.DataLoader.from_generator( diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_multi_device.py b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_multi_device.py index 905df8cd6b..e094d932d3 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_multi_device.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_multi_device.py @@ -85,10 +85,12 @@ class TestDistMnist2x2(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data with fluid.device_guard("gpu:0"): - images = fluid.layers.data( - name='pixel', shape=[1, 28, 28], dtype=DTYPE + images = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') if dist_strategy: data_loader = fluid.io.DataLoader.from_generator( diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_one_device.py b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_one_device.py index dbe50789d6..7e442f1914 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_one_device.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/pipeline_mnist_one_device.py @@ -77,10 +77,12 @@ class TestDistMnist2x2(TestDistRunnerBase): if dist_strategy: fleet.init(is_collective=True) with fluid.device_guard("gpu:0"): - images = fluid.layers.data( - name='pixel', shape=[1, 28, 28], dtype=DTYPE + images = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') if dist_strategy: data_loader = fluid.io.DataLoader.from_generator( diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_half_async.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_half_async.py index 21f7b624f5..6d8decbc3e 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_half_async.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_half_async.py @@ -29,9 +29,9 @@ paddle.enable_static() class TestCommunicatorHalfAsyncEnd2End(unittest.TestCase): def net(self): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_sync.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_sync.py index 550fc5db90..cf0babc587 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_sync.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_communicator_sync.py @@ -27,8 +27,8 @@ import paddle.fluid as fluid class TestCommunicator(unittest.TestCase): def net(self): - x = fluid.layers.data(name='x', shape=[1], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=x, label=y) avg_cost = paddle.mean(cost) return avg_cost diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_distributed_strategy.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_distributed_strategy.py index 3e58391ec9..085cb293c0 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_distributed_strategy.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_distributed_strategy.py @@ -270,8 +270,8 @@ class TestHalfAsyncStrategy(unittest.TestCase): class TestDebugInfo(unittest.TestCase): def test_debug_info(self): - x = fluid.layers.data(name='x', shape=[1], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_fp16_allreduce_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_fp16_allreduce_meta_optimizer.py index 0e5ae267f3..84dd2e0ee9 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_fp16_allreduce_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_fp16_allreduce_meta_optimizer.py @@ -30,12 +30,8 @@ class TestFleetFP16CompressOptimizer(unittest.TestCase): def net(self, main_prog, startup_prog, dtype='float32'): with fluid.program_guard(main_prog, startup_prog): - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype=dtype - ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' - ) + input_x = paddle.static.data(name="x", shape=[-1, 32], dtype=dtype) + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_execution_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_execution_meta_optimizer.py index aa8bc8bd2d..a36c5a1d74 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_execution_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_execution_meta_optimizer.py @@ -60,12 +60,10 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): import paddle.distributed.fleet as fleet fleet.init(is_collective=True) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' ) + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') @@ -126,12 +124,10 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): import paddle.distributed.fleet as fleet fleet.init(is_collective=True) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' ) + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') @@ -204,12 +200,10 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): import paddle.distributed.fleet as fleet fleet.init(is_collective=True) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' ) + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') @@ -269,12 +263,10 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): import paddle.distributed.fleet as fleet fleet.init(is_collective=True) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' ) + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_executor.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_executor.py index 58091bd847..aab4032afb 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_executor.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_graph_executor.py @@ -45,12 +45,10 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): def node_func(): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' ) + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lamb_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lamb_meta_optimizer.py index 2623a222d5..96cf0fffe8 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lamb_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lamb_meta_optimizer.py @@ -33,11 +33,11 @@ class TestFleetLambMetaOptimizer(unittest.TestCase): def net(self, main_prog, startup_prog): with fluid.program_guard(main_prog, startup_prog): with fluid.unique_name.guard(): - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' + input_y = paddle.static.data( + name="y", shape=[-1, 1], dtype='int64' ) fc_1 = paddle.static.nn.fc( @@ -117,10 +117,8 @@ class TestFleetLambMetaOptimizer(unittest.TestCase): def test_lamb_apply_with_amp(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lars_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lars_meta_optimizer.py index 5096062901..b3094b3b6b 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lars_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_lars_meta_optimizer.py @@ -33,11 +33,11 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase): def net(self, main_prog, startup_prog): with fluid.program_guard(main_prog, startup_prog): with fluid.unique_name.guard(): - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' + input_y = paddle.static.data( + name="y", shape=[-1, 1], dtype='int64' ) fc_1 = paddle.static.nn.fc( @@ -122,10 +122,8 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase): def test_lars_apply_with_amp(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_meta_optimizer_base.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_meta_optimizer_base.py index 76fb129a04..3551c89a26 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_meta_optimizer_base.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_meta_optimizer_base.py @@ -29,11 +29,11 @@ class TestFleetMetaOptimizerBase(unittest.TestCase): with fluid.unique_name.guard(): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' + input_y = paddle.static.data( + name="y", shape=[-1, 1], dtype='int64' ) fc_1 = paddle.static.nn.fc( diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer.py index 8fac45b9d2..ddba633199 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer.py @@ -33,14 +33,12 @@ class TestFleetMetaOptimizer(unittest.TestCase): def net(self): with static.device_guard("gpu:0"): - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' - ) - input_z = paddle.fluid.layers.data( - name="z", shape=[1], dtype="float32" + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') + input_z = paddle.static.data( + name="z", shape=[-1, 1], dtype="float32" ) with static.device_guard("gpu:all"): input_z = input_z * 1.0 diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer_with_recompute.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer_with_recompute.py index 3f22238a36..7c80ad9859 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer_with_recompute.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_pipeline_meta_optimizer_with_recompute.py @@ -34,12 +34,10 @@ class TestFleetMetaOptimizer(unittest.TestCase): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) with paddle.fluid.device_guard("gpu:0"): - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' ) + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') fc_3 = paddle.static.nn.fc(x=fc_2, size=64, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_raw_program_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_raw_program_meta_optimizer.py index 29ef57518d..62b3ba99ea 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_raw_program_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_raw_program_meta_optimizer.py @@ -33,10 +33,8 @@ class TestFleetMetaOptimizer(unittest.TestCase): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_rolemaker_new.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_rolemaker_new.py index 196e3425ec..7e68784594 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_rolemaker_new.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_rolemaker_new.py @@ -443,9 +443,9 @@ class TestGlooWithCloudRoleMaker(unittest.TestCase): os.environ["PADDLE_GLOO_FS_PATH"] = tmp def net(): - x = paddle.fluid.layers.data(name='x', shape=[13], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) - y = paddle.fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y ) diff --git a/python/paddle/fluid/tests/unittests/collective_allgather_op.py b/python/paddle/fluid/tests/unittests/collective_allgather_op.py index e877aaae9b..5fd37ebe93 100644 --- a/python/paddle/fluid/tests/unittests/collective_allgather_op.py +++ b/python/paddle/fluid/tests/unittests/collective_allgather_op.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -30,9 +29,10 @@ class TestCollectiveAllGather(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) toutdata = main_prog.current_block().create_var( name="outofgather", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/collective_reducescatter.py b/python/paddle/fluid/tests/unittests/collective_reducescatter.py index 0852f7b2f3..9813553295 100644 --- a/python/paddle/fluid/tests/unittests/collective_reducescatter.py +++ b/python/paddle/fluid/tests/unittests/collective_reducescatter.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers paddle.enable_static() @@ -29,9 +28,10 @@ class TestCollectiveReduceScatter(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) toutdata = fluid.layers.collective._c_reducescatter(tindata, nranks) toutdata = fluid.layers.collective._c_sync_comm_stream(toutdata, 0) return toutdata diff --git a/python/paddle/fluid/tests/unittests/collective_reducescatter_op.py b/python/paddle/fluid/tests/unittests/collective_reducescatter_op.py index d89701e6a4..92c9ec3b6e 100644 --- a/python/paddle/fluid/tests/unittests/collective_reducescatter_op.py +++ b/python/paddle/fluid/tests/unittests/collective_reducescatter_op.py @@ -16,7 +16,6 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -30,9 +29,10 @@ class TestCollectiveReduceScatter(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) toutdata = main_prog.current_block().create_var( name="outofrs", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/dist_allreduce_op.py b/python/paddle/fluid/tests/unittests/dist_allreduce_op.py index 70d06a95b6..30bcea4cb5 100644 --- a/python/paddle/fluid/tests/unittests/dist_allreduce_op.py +++ b/python/paddle/fluid/tests/unittests/dist_allreduce_op.py @@ -72,8 +72,10 @@ def cnn_model(data): class TestDistMnist2x2(TestDistRunnerBase): def get_model(self, batch_size=2, single_device=False): # Input data - images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + images = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE + ) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') # Train program predict = cnn_model(images) diff --git a/python/paddle/fluid/tests/unittests/dist_ctr.py b/python/paddle/fluid/tests/unittests/dist_ctr.py index 7fd86cadb9..deb4cb921c 100644 --- a/python/paddle/fluid/tests/unittests/dist_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_ctr.py @@ -33,26 +33,23 @@ class TestDistCTR2x2(TestDistRunnerBase): dnn_input_dim, lr_input_dim = dist_ctr_reader.load_data_meta() """ network definition """ - dnn_data = fluid.layers.data( + dnn_data = paddle.static.data( name="dnn_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - lr_data = fluid.layers.data( + lr_data = paddle.static.data( name="lr_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - label = fluid.layers.data( + label = paddle.static.data( name="click", shape=[-1, 1], dtype="int64", lod_level=0, - append_batch_size=False, ) # build dnn model diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py b/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py index 360cad4340..8e9341f9c5 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py @@ -62,26 +62,23 @@ class TestDistCTR2x2(FleetDistRunnerBase): """ dnn_input_dim, lr_input_dim = int(1e5), int(1e5) - dnn_data = fluid.layers.data( + dnn_data = paddle.static.data( name="dnn_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - lr_data = fluid.layers.data( + lr_data = paddle.static.data( name="lr_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - label = fluid.layers.data( + label = paddle.static.data( name="click", shape=[-1, 1], dtype="int64", lod_level=0, - append_batch_size=False, ) datas = [dnn_data, lr_data, label] diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py b/python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py index 8d4efa8c3d..3e71a1cb60 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py @@ -49,26 +49,23 @@ class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase): dnn_input_dim, lr_input_dim = int(1e5), int(1e5) with fluid.device_guard("cpu"): - dnn_data = fluid.layers.data( + dnn_data = paddle.static.data( name="dnn_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - lr_data = fluid.layers.data( + lr_data = paddle.static.data( name="lr_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - label = fluid.layers.data( + label = paddle.static.data( name="click", shape=[-1, 1], dtype="float32", lod_level=0, - append_batch_size=False, ) datas = [dnn_data, lr_data, label] diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer.py b/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer.py index 539446b674..dc0a7022b3 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer.py @@ -74,8 +74,10 @@ def cnn_model(data): class TestFleetMetaOptimizerPrecision(TestDistRunnerBase): def get_model(self, batch_size=2, single_device=False): # Input data - images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + images = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE + ) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') # Train program predict = cnn_model(images) diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer_fuse_allreduce.py b/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer_fuse_allreduce.py index efad598c72..ac1a4c632f 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer_fuse_allreduce.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_raw_program_optimizer_fuse_allreduce.py @@ -74,8 +74,10 @@ def cnn_model(data): class TestFleetMetaOptimizerFuseAllReducePrecision(TestDistRunnerBase): def get_model(self, batch_size=2, single_device=False): # Input data - images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + images = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE + ) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') # Train program predict = cnn_model(images) diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py b/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py index 358b3b5e39..b673bfeae1 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py @@ -93,18 +93,18 @@ def train_network( is_pyreader=False, ): # query - q = fluid.layers.data( - name="query_ids", shape=[1], dtype="int64", lod_level=1 + q = paddle.static.data( + name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # label data - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") # pt - pt = fluid.layers.data( - name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + pt = paddle.static.data( + name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # nt - nt = fluid.layers.data( - name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + nt = paddle.static.data( + name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) datas = [q, label, pt, nt] diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_sparse_embedding_ctr.py b/python/paddle/fluid/tests/unittests/dist_fleet_sparse_embedding_ctr.py index 9e398e83b9..a9a2d7be0b 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_sparse_embedding_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_sparse_embedding_ctr.py @@ -52,26 +52,23 @@ class TestDistCTR2x2(FleetDistRunnerBase): """ dnn_input_dim, lr_input_dim = 10, 10 - dnn_data = fluid.layers.data( + dnn_data = paddle.static.data( name="dnn_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - lr_data = fluid.layers.data( + lr_data = paddle.static.data( name="lr_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - label = fluid.layers.data( + label = paddle.static.data( name="click", shape=[-1, 1], dtype="int64", lod_level=0, - append_batch_size=False, ) datas = [dnn_data, lr_data, label] diff --git a/python/paddle/fluid/tests/unittests/dist_mnist.py b/python/paddle/fluid/tests/unittests/dist_mnist.py index 117e178dd9..87eb22dcea 100644 --- a/python/paddle/fluid/tests/unittests/dist_mnist.py +++ b/python/paddle/fluid/tests/unittests/dist_mnist.py @@ -73,8 +73,10 @@ def cnn_model(data): class TestDistMnist2x2(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data - images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + images = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE + ) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') # Train program predict = cnn_model(images) diff --git a/python/paddle/fluid/tests/unittests/dist_mnist_batch_merge.py b/python/paddle/fluid/tests/unittests/dist_mnist_batch_merge.py index cac46996ed..e38d2176bc 100644 --- a/python/paddle/fluid/tests/unittests/dist_mnist_batch_merge.py +++ b/python/paddle/fluid/tests/unittests/dist_mnist_batch_merge.py @@ -38,8 +38,10 @@ def test_merge_reader(repeat_batch_size=8): class TestDistMnist2x2(TestDistRunnerBase): def get_model(self, batch_size=2): # Input data - images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + images = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE + ) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') # Train program predict = cnn_model(images) diff --git a/python/paddle/fluid/tests/unittests/dist_mnist_fp16_allreduce.py b/python/paddle/fluid/tests/unittests/dist_mnist_fp16_allreduce.py index e4af13fa89..9aa6628542 100644 --- a/python/paddle/fluid/tests/unittests/dist_mnist_fp16_allreduce.py +++ b/python/paddle/fluid/tests/unittests/dist_mnist_fp16_allreduce.py @@ -32,8 +32,10 @@ fluid.default_main_program().random_seed = 1 class TestDistMnist2x2(TestDistRunnerBase): def get_model(self, batch_size=2): # Input data - images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + images = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE + ) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') # Train program predict = cnn_model(images) diff --git a/python/paddle/fluid/tests/unittests/dist_mnist_lars.py b/python/paddle/fluid/tests/unittests/dist_mnist_lars.py index dc2fc10450..b1f5e0f2e8 100644 --- a/python/paddle/fluid/tests/unittests/dist_mnist_lars.py +++ b/python/paddle/fluid/tests/unittests/dist_mnist_lars.py @@ -29,8 +29,10 @@ fluid.default_main_program().random_seed = 1 class TestDistMnist2x2(TestDistRunnerBase): def get_model(self, batch_size=2): # Input data - images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + images = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE + ) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') # Train program predict = cnn_model(images) diff --git a/python/paddle/fluid/tests/unittests/dist_se_resnext.py b/python/paddle/fluid/tests/unittests/dist_se_resnext.py index 377ad30722..db3318d67d 100644 --- a/python/paddle/fluid/tests/unittests/dist_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/dist_se_resnext.py @@ -209,10 +209,10 @@ class SE_ResNeXt: class DistSeResneXt2x2(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False): # Input data - image = fluid.layers.data( - name="data", shape=[3, 224, 224], dtype='float32' + image = paddle.static.data( + name="data", shape=[-1, 3, 224, 224], dtype='float32' ) - label = fluid.layers.data(name="int64", shape=[1], dtype='int64') + label = paddle.static.data(name="int64", shape=[-1, 1], dtype='int64') # Train program model = SE_ResNeXt(layers=50) diff --git a/python/paddle/fluid/tests/unittests/dist_sharding_save.py b/python/paddle/fluid/tests/unittests/dist_sharding_save.py index 1c4f49093d..c509b2506b 100755 --- a/python/paddle/fluid/tests/unittests/dist_sharding_save.py +++ b/python/paddle/fluid/tests/unittests/dist_sharding_save.py @@ -38,12 +38,10 @@ def runtime_main(): fleet.init(role) with fluid.program_guard(train_prog, startup_prog): with fluid.unique_name.guard(): - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' ) + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=256, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/dist_text_classification.py b/python/paddle/fluid/tests/unittests/dist_text_classification.py index de8630cf70..a287bd8a6c 100644 --- a/python/paddle/fluid/tests/unittests/dist_text_classification.py +++ b/python/paddle/fluid/tests/unittests/dist_text_classification.py @@ -95,8 +95,8 @@ def conv_net( def inference_network(dict_dim): - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 ) out = conv_net(data, dict_dim) return out @@ -125,10 +125,10 @@ class TestDistTextClassification2x2(TestDistRunnerBase): word_dict, dict_dim = get_worddict(vocab) # Input data - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') # Train program predict = conv_net(data, dict_dim) diff --git a/python/paddle/fluid/tests/unittests/dist_transformer.py b/python/paddle/fluid/tests/unittests/dist_transformer.py index 0db96bee8b..5b0343bd81 100644 --- a/python/paddle/fluid/tests/unittests/dist_transformer.py +++ b/python/paddle/fluid/tests/unittests/dist_transformer.py @@ -1512,14 +1512,13 @@ def make_all_inputs(input_fields): """ inputs = [] for input_field in input_fields: - input_var = layers.data( + input_var = paddle.static.data( name=input_field, shape=input_descs[input_field][0], dtype=input_descs[input_field][1], lod_level=input_descs[input_field][2] if len(input_descs[input_field]) == 3 else 0, - append_batch_size=False, ) inputs.append(input_var) return inputs diff --git a/python/paddle/fluid/tests/unittests/dist_word2vec.py b/python/paddle/fluid/tests/unittests/dist_word2vec.py index 746a1f0765..e10131667c 100644 --- a/python/paddle/fluid/tests/unittests/dist_word2vec.py +++ b/python/paddle/fluid/tests/unittests/dist_word2vec.py @@ -107,13 +107,21 @@ class TestDistWord2vec2x2(TestDistRunnerBase): word_dict = paddle.dataset.imikolov.build_dict() dict_size = len(word_dict) - first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64') - second_word = fluid.layers.data( - name='secondw', shape=[1], dtype='int64' + first_word = paddle.static.data( + name='firstw', shape=[-1, 1], dtype='int64' + ) + second_word = paddle.static.data( + name='secondw', shape=[-1, 1], dtype='int64' + ) + third_word = paddle.static.data( + name='thirdw', shape=[-1, 1], dtype='int64' + ) + forth_word = paddle.static.data( + name='forthw', shape=[-1, 1], dtype='int64' + ) + next_word = paddle.static.data( + name='nextw', shape=[-1, 1], dtype='int64' ) - third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64') - forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64') - next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') avg_cost, predict_word = __network__( [first_word, second_word, third_word, forth_word, next_word] ) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_normal.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_normal.py index 42c69af5e6..bb3fb57161 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_normal.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_normal.py @@ -24,7 +24,6 @@ from test_distribution import DistributionNumpy import paddle from paddle import fluid from paddle.distribution import Normal -from paddle.fluid import layers np.random.seed(2022) @@ -117,8 +116,8 @@ class NormalTest(unittest.TestCase): self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np with fluid.program_guard(self.test_program): - self.static_values = layers.data( - name='values', shape=[], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1], dtype='float32' ) def compare_with_numpy(self, fetch_list, sample_shape=7, tolerance=1e-6): @@ -237,8 +236,8 @@ class NormalTest3(NormalTest): self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np with fluid.program_guard(self.test_program): - self.static_values = layers.data( - name='values', shape=[dims], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float32' ) @@ -266,8 +265,8 @@ class NormalTest4(NormalTest): self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np with fluid.program_guard(self.test_program): - self.static_values = layers.data( - name='values', shape=[dims], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float32' ) @@ -302,8 +301,8 @@ class NormalTest5(NormalTest): self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np with fluid.program_guard(self.test_program): - self.static_values = layers.data( - name='values', shape=[dims], dtype='float64' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float64' ) @@ -334,20 +333,20 @@ class NormalTest6(NormalTest): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.static_loc = layers.data( - name='loc', shape=[dims], dtype='float32' + self.static_loc = paddle.static.data( + name='loc', shape=[-1, dims], dtype='float32' ) - self.static_scale = layers.data( - name='scale', shape=[dims], dtype='float32' + self.static_scale = paddle.static.data( + name='scale', shape=[-1, dims], dtype='float32' ) - self.static_values = layers.data( - name='values', shape=[dims], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float32' ) - self.static_other_loc = layers.data( - name='other_loc', shape=[dims], dtype='float32' + self.static_other_loc = paddle.static.data( + name='other_loc', shape=[-1, dims], dtype='float32' ) - self.static_other_scale = layers.data( - name='other_scale', shape=[dims], dtype='float32' + self.static_other_scale = paddle.static.data( + name='other_scale', shape=[-1, dims], dtype='float32' ) @@ -382,20 +381,20 @@ class NormalTest7(NormalTest): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.static_loc = layers.data( - name='loc', shape=[dims], dtype='float64' + self.static_loc = paddle.static.data( + name='loc', shape=[-1, dims], dtype='float64' ) - self.static_scale = layers.data( - name='scale', shape=[dims], dtype='float64' + self.static_scale = paddle.static.data( + name='scale', shape=[-1, dims], dtype='float64' ) - self.static_values = layers.data( - name='values', shape=[dims], dtype='float64' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float64' ) - self.static_other_loc = layers.data( - name='other_loc', shape=[dims], dtype='float64' + self.static_other_loc = paddle.static.data( + name='other_loc', shape=[-1, dims], dtype='float64' ) - self.static_other_scale = layers.data( - name='other_scale', shape=[dims], dtype='float64' + self.static_other_scale = paddle.static.data( + name='other_scale', shape=[-1, dims], dtype='float64' ) @@ -430,20 +429,20 @@ class NormalTest8(NormalTest): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.static_loc = layers.data( - name='loc', shape=[dims], dtype='float64' + self.static_loc = paddle.static.data( + name='loc', shape=[-1, dims], dtype='float64' ) - self.static_scale = layers.data( - name='scale', shape=[dims], dtype='float64' + self.static_scale = paddle.static.data( + name='scale', shape=[-1, dims], dtype='float64' ) - self.static_values = layers.data( - name='values', shape=[dims], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float32' ) - self.static_other_loc = layers.data( - name='other_loc', shape=[dims], dtype='float64' + self.static_other_loc = paddle.static.data( + name='other_loc', shape=[-1, dims], dtype='float64' ) - self.static_other_scale = layers.data( - name='other_scale', shape=[dims], dtype='float64' + self.static_other_scale = paddle.static.data( + name='other_scale', shape=[-1, dims], dtype='float64' ) @@ -477,8 +476,8 @@ class NormalTest9(NormalTest): self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np with fluid.program_guard(self.test_program): - self.static_values = layers.data( - name='values', shape=[dims], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float32' ) @@ -512,8 +511,8 @@ class NormalTest10(NormalTest): self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np with fluid.program_guard(self.test_program): - self.static_values = layers.data( - name='values', shape=[dims], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float32' ) diff --git a/python/paddle/fluid/tests/unittests/distribution/test_distribution_uniform.py b/python/paddle/fluid/tests/unittests/distribution/test_distribution_uniform.py index da5500ccef..ca59b5118e 100644 --- a/python/paddle/fluid/tests/unittests/distribution/test_distribution_uniform.py +++ b/python/paddle/fluid/tests/unittests/distribution/test_distribution_uniform.py @@ -20,7 +20,6 @@ from test_distribution import DistributionNumpy import paddle from paddle import fluid from paddle.distribution import Uniform -from paddle.fluid import layers np.random.seed(2022) @@ -88,8 +87,8 @@ class UniformTest(unittest.TestCase): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data( - name='values', shape=[], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1], dtype='float32' ) def compare_with_numpy(self, fetch_list, sample_shape=7, tolerance=1e-6): @@ -170,8 +169,8 @@ class UniformTest3(UniformTest): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data( - name='values', shape=[dims], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float32' ) @@ -188,8 +187,8 @@ class UniformTest4(UniformTest): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data( - name='values', shape=[dims], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float32' ) @@ -211,8 +210,8 @@ class UniformTest5(UniformTest): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data( - name='values', shape=[dims], dtype='float64' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float64' ) @@ -232,14 +231,14 @@ class UniformTest6(UniformTest): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.static_low = layers.data( - name='low', shape=[dims], dtype='float32' + self.static_low = paddle.static.data( + name='low', shape=[-1, dims], dtype='float32' ) - self.static_high = layers.data( - name='high', shape=[dims], dtype='float32' + self.static_high = paddle.static.data( + name='high', shape=[-1, dims], dtype='float32' ) - self.static_values = layers.data( - name='values', shape=[dims], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float32' ) @@ -259,14 +258,14 @@ class UniformTest7(UniformTest): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.static_low = layers.data( - name='low', shape=[dims], dtype='float64' + self.static_low = paddle.static.data( + name='low', shape=[-1, dims], dtype='float64' ) - self.static_high = layers.data( - name='high', shape=[dims], dtype='float64' + self.static_high = paddle.static.data( + name='high', shape=[-1, dims], dtype='float64' ) - self.static_values = layers.data( - name='values', shape=[dims], dtype='float64' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float64' ) @@ -286,14 +285,14 @@ class UniformTest8(UniformTest): def init_static_data(self, batch_size, dims): with fluid.program_guard(self.test_program): - self.static_low = layers.data( - name='low', shape=[dims], dtype='float64' + self.static_low = paddle.static.data( + name='low', shape=[-1, dims], dtype='float64' ) - self.static_high = layers.data( - name='high', shape=[dims], dtype='float64' + self.static_high = paddle.static.data( + name='high', shape=[-1, dims], dtype='float64' ) - self.static_values = layers.data( - name='values', shape=[dims], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float32' ) @@ -311,8 +310,8 @@ class UniformTest9(UniformTest): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data( - name='values', shape=[dims], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float32' ) @@ -333,8 +332,8 @@ class UniformTest10(UniformTest): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data( - name='values', shape=[dims], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float32' ) @@ -355,8 +354,8 @@ class UniformTest11(UniformTest): self.static_low = self.low_np self.static_high = self.high_np with fluid.program_guard(self.test_program): - self.static_values = layers.data( - name='values', shape=[dims], dtype='float32' + self.static_values = paddle.static.data( + name='values', shape=[-1, dims], dtype='float32' ) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py index 92bdde2d94..680d8afd48 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/transformer_util.py @@ -296,12 +296,11 @@ class InputField: self.feed_list = [] for slot in input_slots: self.feed_list.append( - fluid.layers.data( + paddle.static.data( name=slot['name'], shape=slot['shape'], dtype=slot['dtype'], lod_level=slot.get('lod_level', 0), - append_batch_size=False, ) ) diff --git a/python/paddle/fluid/tests/unittests/fleet_heter_ps_training.py b/python/paddle/fluid/tests/unittests/fleet_heter_ps_training.py index 8f803ce1db..917beec752 100644 --- a/python/paddle/fluid/tests/unittests/fleet_heter_ps_training.py +++ b/python/paddle/fluid/tests/unittests/fleet_heter_ps_training.py @@ -41,26 +41,23 @@ def net(batch_size=4, lr=0.01): dnn_input_dim, lr_input_dim = int(2), int(2) with fluid.device_guard("cpu"): - dnn_data = fluid.layers.data( + dnn_data = paddle.static.data( name="dnn_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - lr_data = fluid.layers.data( + lr_data = paddle.static.data( name="lr_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - label = fluid.layers.data( + label = paddle.static.data( name="click", shape=[-1, 1], dtype="float32", lod_level=0, - append_batch_size=False, ) datas = [dnn_data, lr_data, label] diff --git a/python/paddle/fluid/tests/unittests/fleet_meta_optimizer_base.py b/python/paddle/fluid/tests/unittests/fleet_meta_optimizer_base.py index 628953391c..e17a9604fc 100755 --- a/python/paddle/fluid/tests/unittests/fleet_meta_optimizer_base.py +++ b/python/paddle/fluid/tests/unittests/fleet_meta_optimizer_base.py @@ -55,11 +55,11 @@ class TestFleetMetaOptimizer(unittest.TestCase): with fluid.unique_name.guard(): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' + input_y = paddle.static.data( + name="y", shape=[-1, 1], dtype='int64' ) fc_1 = paddle.static.nn.fc( @@ -92,11 +92,11 @@ class TestFleetMetaOptimizer(unittest.TestCase): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) with fluid.device_guard("gpu:0"): - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' + input_x = paddle.static.data( + name="x", shape=[-1, 32], dtype='float32' ) - input_y = paddle.fluid.layers.data( - name="y", shape=[1], dtype='int64' + input_y = paddle.static.data( + name="y", shape=[-1, 1], dtype='int64' ) for stage_idx in range(pp_degree): diff --git a/python/paddle/fluid/tests/unittests/fleet_ps_training.py b/python/paddle/fluid/tests/unittests/fleet_ps_training.py index be097ad3ac..9b5ccf7c99 100644 --- a/python/paddle/fluid/tests/unittests/fleet_ps_training.py +++ b/python/paddle/fluid/tests/unittests/fleet_ps_training.py @@ -15,14 +15,15 @@ from nets import mlp from utils import gen_data +import paddle import paddle.fluid as fluid from paddle.fluid.incubate.fleet.base import role_maker from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import ( fleet, ) -input_x = fluid.layers.data(name="x", shape=[32], dtype='float32') -input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') +input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') +input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') input_y = fluid.layers.cast(input_y, dtype="float32") with fluid.device_guard("gpu"): diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py index ba5e304b61..8f6c859242 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py @@ -25,23 +25,20 @@ import paddle.fluid.core as core class EmbEltwiseLayerNormFusePassTest(PassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): - word_id = fluid.layers.data( + word_id = paddle.static.data( name="word_id", shape=[1, 128, 1], dtype="int64", - append_batch_size=False, ) - pos_id = fluid.layers.data( + pos_id = paddle.static.data( name="pos_id", shape=[1, 128, 1], dtype="int64", - append_batch_size=False, ) - sent_id = fluid.layers.data( + sent_id = paddle.static.data( name="sent_id", shape=[1, 128, 1], dtype="int64", - append_batch_size=False, ) word_emb = fluid.layers.embedding( input=word_id, size=(128, 768), dtype='float32' @@ -56,29 +53,25 @@ class EmbEltwiseLayerNormFusePassTest(PassTest): add2 = paddle.add(add1, sent_emb) hidden1 = paddle.static.nn.layer_norm(input=add2, begin_norm_axis=2) - id1 = fluid.layers.data( + id1 = paddle.static.data( name="id1", shape=[1, 128, 1], dtype="int64", - append_batch_size=False, ) - id2 = fluid.layers.data( + id2 = paddle.static.data( name="id2", shape=[1, 128, 1], dtype="int64", - append_batch_size=False, ) - id3 = fluid.layers.data( + id3 = paddle.static.data( name="id3", shape=[1, 128, 1], dtype="int64", - append_batch_size=False, ) - id4 = fluid.layers.data( + id4 = paddle.static.data( name="id4", shape=[1, 128, 1], dtype="int64", - append_batch_size=False, ) emb1 = fluid.layers.embedding( input=id1, size=(128, 768), dtype='float32' diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py b/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py index 21697177d0..6e0bb08c92 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py @@ -28,10 +28,12 @@ paddle.enable_static() class TestQuantizationSubGraph(unittest.TestCase): def build_graph_with_sub_graph(self): def linear_fc(num): - data = fluid.layers.data( - name='image', shape=[1, 32, 32], dtype='float32' + data = paddle.static.data( + name='image', shape=[-1, 1, 32, 32], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') hidden = data for _ in range(num): hidden = paddle.static.nn.fc( diff --git a/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py b/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py index 9f7b2d8533..ae2870af83 100644 --- a/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py +++ b/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py @@ -60,11 +60,11 @@ class BuildIrMemOptBase(unittest.TestCase): fluid.default_startup_program().random_seed = 100 fluid.default_main_program().random_seed = 100 - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 ) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") cost = network(data, label, len(self.word_dict)) optimizer = fluid.optimizer.Adam(learning_rate=0.001) diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_allgather_api.py b/python/paddle/fluid/tests/unittests/mlu/collective_allgather_api.py index 1d8d11ad11..91c839e9ea 100755 --- a/python/paddle/fluid/tests/unittests/mlu/collective_allgather_api.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_allgather_api.py @@ -45,8 +45,8 @@ class TestCollectiveAllgatherAPI(TestCollectiveAPIRunnerBase): def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): tensor_list = [] - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) paddle.distributed.all_gather(tensor_list, tindata) return tensor_list diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_allgather_op.py b/python/paddle/fluid/tests/unittests/mlu/collective_allgather_op.py index 8040e834d6..33d9f05cf7 100755 --- a/python/paddle/fluid/tests/unittests/mlu/collective_allgather_op.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_allgather_op.py @@ -42,9 +42,10 @@ class TestCollectiveAllgather(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) toutdata = main_prog.current_block().create_var( name="outofallgather", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_api.py b/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_api.py index 22ca990c55..a636dbb469 100644 --- a/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_api.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_api.py @@ -44,8 +44,8 @@ class TestCollectiveAllreduceAPI(TestCollectiveAPIRunnerBase): def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) paddle.distributed.all_reduce(tindata) return [tindata] diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_op.py b/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_op.py index 609d9e7c41..2d9f6ee03e 100644 --- a/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_op.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_allreduce_op.py @@ -42,9 +42,11 @@ class TestCollectiveAllreduce(TestCollectiveRunnerBase): def get_model(self, main_prog, startup_program, col_type): ring_id = 0 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) + toutdata = main_prog.current_block().create_var( name="outof" + col_type, dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_api.py b/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_api.py index 0c1ae57225..673ffe67a0 100644 --- a/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_api.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_api.py @@ -44,9 +44,11 @@ class TestCollectiveBroadcastAPI(TestCollectiveAPIRunnerBase): def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype="float32" + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype="float32" ) + tindata.desc.set_need_check_feed(False) + paddle.distributed.broadcast(tindata, src=1) return [tindata] diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_op.py b/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_op.py index 982d5d204f..ed42a7964e 100755 --- a/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_op.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_broadcast_op.py @@ -43,9 +43,11 @@ class TestCollectiveBroadcast(TestCollectiveRunnerBase): ring_id = 0 rootid = 1 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) + toutdata = main_prog.current_block().create_var( name="outofbroadcast", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_reduce_api.py b/python/paddle/fluid/tests/unittests/mlu/collective_reduce_api.py index 33a0d9d0d5..2121aaee2b 100644 --- a/python/paddle/fluid/tests/unittests/mlu/collective_reduce_api.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_reduce_api.py @@ -44,9 +44,11 @@ class TestCollectiveReduceAPI(TestCollectiveAPIRunnerBase): def get_model(self, main_prog, startup_program, rank): with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) + paddle.distributed.reduce(tindata, dst=0) return [tindata] diff --git a/python/paddle/fluid/tests/unittests/mlu/collective_reduce_op.py b/python/paddle/fluid/tests/unittests/mlu/collective_reduce_op.py index d9271376b0..f389cef9d6 100644 --- a/python/paddle/fluid/tests/unittests/mlu/collective_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/mlu/collective_reduce_op.py @@ -43,9 +43,11 @@ class TestCollectiveReduce(TestCollectiveRunnerBase): ring_id = 0 rootid = 1 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) + tindata.desc.set_need_check_feed(False) + toutdata = main_prog.current_block().create_var( name="outof" + col_type, dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/mlu/sync_batch_norm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/sync_batch_norm_op_mlu.py index 20b3a00cc9..8a9405c1fc 100644 --- a/python/paddle/fluid/tests/unittests/mlu/sync_batch_norm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/sync_batch_norm_op_mlu.py @@ -70,11 +70,10 @@ class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase): use_cudnn = False with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - data = fluid.layers.data( + data = paddle.static.data( name='input', shape=self.dshape, dtype=self.dtype, - append_batch_size=False, ) conv = paddle.static.nn.conv2d( input=data, diff --git a/python/paddle/fluid/tests/unittests/mlu/test_accuracy_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_accuracy_op_mlu.py index a32765b8d3..644a2e81db 100755 --- a/python/paddle/fluid/tests/unittests/mlu/test_accuracy_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_accuracy_op_mlu.py @@ -71,16 +71,16 @@ class TestAccuracyOpError(unittest.TestCase): x1 = fluid.create_lod_tensor( np.array([[-1]]), [[1]], fluid.MLUPlace(0) ) - label = fluid.layers.data( + label = paddle.static.data( name='label', shape=[-1, 1], dtype="int32" ) self.assertRaises(TypeError, paddle.static.accuracy, x1, label) self.assertRaises(TypeError, paddle.metric.accuracy, x1, label) # The input dtype of accuracy_op must be float32 or float64. - x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32") + x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="int32") self.assertRaises(TypeError, paddle.static.accuracy, x2, label) self.assertRaises(TypeError, paddle.metric.accuracy, x2, label) - x3 = fluid.layers.data(name='input', shape=[-1, 2], dtype="float16") + x3 = paddle.static.data(name='input', shape=[-1, 2], dtype="float16") paddle.static.accuracy(input=x3, label=label) paddle.metric.accuracy(input=x3, label=label) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py index 66876ddb79..abd86efcf8 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py @@ -745,7 +745,7 @@ class TestBatchNormOpError(unittest.TestCase): # the input dtype of batch_norm must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="int32") + x2 = paddle.static.data(name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32") self.assertRaises(TypeError, paddle.static.nn.batch_norm, x2) @@ -761,7 +761,7 @@ class TestDygraphBatchNormAPIError(unittest.TestCase): # the input dtype of BatchNorm must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="int32") + x2 = paddle.static.data(name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32") self.assertRaises(TypeError, batch_norm, x2) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_compare_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_compare_op_mlu.py index f994fb185f..568d03acd4 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_compare_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_compare_op_mlu.py @@ -44,9 +44,9 @@ def create_test_class(op_type, typename, callback): def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): - a = fluid.layers.data(name='a', shape=[2], dtype='float32') - b = fluid.layers.data(name='b', shape=[2], dtype='float32') - c = fluid.layers.data(name='c', shape=[2], dtype='int16') + a = paddle.static.data(name='a', shape=[-1, 2], dtype='float32') + b = paddle.static.data(name='b', shape=[-1, 2], dtype='float32') + c = paddle.static.data(name='c', shape=[-1, 2], dtype='int16') d = fluid.create_lod_tensor(np.array([[-1]]), [[1]], self.place) op = eval("fluid.layers.%s" % self.op_type) @@ -134,8 +134,8 @@ def create_test_class(op_type, typename, callback): def test_attr_name(self): paddle.enable_static() with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[4], dtype=typename) - y = fluid.layers.data(name='y', shape=[4], dtype=typename) + x = paddle.static.data(name='x', shape=[-1, 4], dtype=typename) + y = paddle.static.data(name='y', shape=[-1, 4], dtype=typename) op = eval("paddle.%s" % (self.op_type)) out = op(x=x, y=y, name="name_%s" % (self.op_type)) self.assertEqual("name_%s" % (self.op_type) in out.name, True) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_conv2d_transposed_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_conv2d_transposed_op_mlu.py index a24206e090..b4329068c6 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_conv2d_transposed_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_conv2d_transposed_op_mlu.py @@ -493,11 +493,11 @@ class TestConv2DTransposeAPI(unittest.TestCase): self.place = paddle.device.MLUPlace(0) def test_case1(self): - data1 = fluid.layers.data( - name='data1', shape=[3, 5, 5], dtype='float32' + data1 = paddle.static.data( + name='data1', shape=[-1, 3, 5, 5], dtype='float32' ) - data2 = fluid.layers.data( - name='data2', shape=[5, 5, 3], dtype='float32' + data2 = paddle.static.data( + name='data2', shape=[-1, 5, 5, 3], dtype='float32' ) out1 = paddle.static.nn.conv2d_transpose( input=data1, @@ -583,7 +583,7 @@ class TestConv2DTransposeOpException(unittest.TestCase): self.place = paddle.device.MLUPlace(0) def test_exception(self): - data = fluid.layers.data(name='data', shape=[3, 5, 5], dtype="float32") + data = paddle.static.data(name='data', shape=[-1, 3, 5, 5], dtype="float32") def attr_data_format(): out = paddle.static.nn.conv2d_transpose( @@ -630,8 +630,8 @@ class TestConv2DTransposeOpException(unittest.TestCase): self.assertRaises(ValueError, attr_padding_with_data_format) - error_input = fluid.layers.data( - name='error_data', shape=[1], dtype="float32" + error_input = paddle.static.data( + name='error_data', shape=[-1, 1], dtype="float32" ) def error_input_size(): diff --git a/python/paddle/fluid/tests/unittests/mlu/test_expand_as_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_expand_as_v2_op_mlu.py index 0a88f75983..5d7b399256 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_expand_as_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_expand_as_v2_op_mlu.py @@ -137,14 +137,13 @@ class TestExpandAsV2API(unittest.TestCase): def test_api(self): input1 = np.random.random([12, 14]).astype("float32") input2 = np.random.random([2, 12, 14]).astype("float32") - x = fluid.layers.data( - name='x', shape=[12, 14], append_batch_size=False, dtype="float32" + x = paddle.static.data( + name='x', shape=[12, 14], dtype="float32" ) - y = fluid.layers.data( + y = paddle.static.data( name='target_tensor', shape=[2, 12, 14], - append_batch_size=False, dtype="float32", ) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py index 46d3d9d214..c2a3dbfa20 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_expand_v2_op_mlu.py @@ -220,9 +220,9 @@ class TestExpandV2Error(unittest.TestCase): ) shape = [2, 2] self.assertRaises(TypeError, paddle.tensor.expand, x1, shape) - x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") + x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="uint8") self.assertRaises(TypeError, paddle.tensor.expand, x2, shape) - x3 = fluid.layers.data(name='x3', shape=[4], dtype="bool") + x3 = paddle.static.data(name='x3', shape=[-1, 4], dtype="bool") x3.stop_gradient = False self.assertRaises(ValueError, paddle.tensor.expand, x3, shape) @@ -231,15 +231,14 @@ class TestExpandV2Error(unittest.TestCase): class TestExpandV2API(unittest.TestCase): def test_api(self): input = np.random.random([12, 14]).astype("float32") - x = fluid.layers.data( - name='x', shape=[12, 14], append_batch_size=False, dtype="float32" + x = paddle.static.data( + name='x', shape=[12, 14], dtype="float32" ) positive_2 = fluid.layers.fill_constant([1], "int32", 12) - expand_shape = fluid.layers.data( + expand_shape = paddle.static.data( name="expand_shape", shape=[2], - append_batch_size=False, dtype="int32", ) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py index 7a19779b04..6d67b6fc8f 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_fill_constant_op_mlu.py @@ -391,7 +391,7 @@ class TestFillConstantOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # for ci coverage - x1 = fluid.layers.data(name='x1', shape=[1], dtype="int16") + x1 = paddle.static.data(name='x1', shape=[-1, 1], dtype="int16") self.assertRaises( TypeError, fluid.layers.fill_constant, @@ -411,7 +411,7 @@ class TestFillConstantOpError(unittest.TestCase): # The argument dtype of fill_constant_op must be one of bool, float16, # float32, float64, uint8, int16, int32 or int64 - x2 = fluid.layers.data(name='x2', shape=[1], dtype="int32") + x2 = paddle.static.data(name='x2', shape=[-1, 1], dtype="int32") self.assertRaises( TypeError, diff --git a/python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py index 6e095c9b85..e9d2489285 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py @@ -245,11 +245,10 @@ class TestLayerNormOp(unittest.TestCase): class TestLayerNormAPI(unittest.TestCase): def test_case(self): - x = fluid.layers.data( + x = paddle.static.data( name='x', shape=[64, 32, 256], dtype='float32', - append_batch_size=False, ) x = paddle.static.nn.layer_norm( x, @@ -291,7 +290,7 @@ class TestDygraphLayerNormAPIError(unittest.TestCase): self.assertRaises(TypeError, layer_norm, x1) # the input dtype of LayerNorm must be float32 or float16 - x2 = fluid.layers.data(name='x2', shape=[3, 32, 32], dtype="int32") + x2 = paddle.static.data(name='x2', shape=[-1, 3, 32, 32], dtype="int32") self.assertRaises(TypeError, layer_norm, x2) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py index 11df0949a6..c167ac67cc 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py @@ -70,11 +70,11 @@ class TestLog(TestActivation): self.outputs = {'Out': out} def test_error(self): - in1 = fluid.layers.data( - name="in1", shape=[11, 17], append_batch_size=False, dtype="int32" + in1 = paddle.static.data( + name="in1", shape=[11, 17], dtype="int32" ) - in2 = fluid.layers.data( - name="in2", shape=[11, 17], append_batch_size=False, dtype="int64" + in2 = paddle.static.data( + name="in2", shape=[-1, 11, 17], dtype="int64" ) self.assertRaises(TypeError, fluid.layers.log, in1) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py index 5e5e517878..3a757e9cb6 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_momentum_op_mlu.py @@ -140,8 +140,8 @@ class TestMomentumV2(unittest.TestCase): place = fluid.MLUPlace(0) main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1) cost =paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -265,8 +265,8 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): place = fluid.MLUPlace(0) main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1) cost =paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_one_hot_v2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_one_hot_v2_op_mlu.py index b28d191fa2..dfe989fa83 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_one_hot_v2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_one_hot_v2_op_mlu.py @@ -150,8 +150,8 @@ class TestOneHotOp_exception(unittest.TestCase): def test_check_output(self): program = Program() with program_guard(program): - x = fluid.layers.data( - name='x', shape=[self.dimension], dtype='float32', lod_level=1 + x = paddle.static.data( + name='x', shape=[-1, self.dimension], dtype='float32', lod_level=1 ) block = program.current_block() one_hot_out = block.create_var( @@ -207,7 +207,7 @@ class TestOneHotOpApi(unittest.TestCase): # paddle.to_tensor(label), depth) def _run(self, depth): - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") one_hot_label = fluid.one_hot(input=label, depth=depth) label_data = np.array( @@ -234,10 +234,9 @@ class BadInputTestOnehotV2(unittest.TestCase): with fluid.program_guard(fluid.Program()): def test_bad_x(): - label = fluid.layers.data( + label = paddle.static.data( name="label", shape=[4], - append_batch_size=False, dtype="float32", ) one_hot_label = fluid.one_hot(input=label, depth=4) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py index 139d347e0d..4e4f0c5661 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py @@ -705,31 +705,27 @@ class TestPool2DAPI(unittest.TestCase): x_NHWC = np.random.random([2, 5, 5, 3]).astype("float32") x_NCHW = np.random.random([2, 3, 5, 5]).astype("float32") - input_NHWC = fluid.layers.data( + input_NHWC = paddle.static.data( name="input_NHWC", shape=[2, 5, 5, 3], - append_batch_size=False, dtype="float32", ) - input_NCHW = fluid.layers.data( + input_NCHW = paddle.static.data( name="input_NCHW", shape=[2, 3, 5, 5], - append_batch_size=False, dtype="float32", ) - input_NHWC_negetive = fluid.layers.data( + input_NHWC_negetive = paddle.static.data( name="input_NHWC_negetive", shape=[2, -1, 5, 3], - append_batch_size=False, dtype="float32", ) - input_NCHW_negetive = fluid.layers.data( + input_NCHW_negetive = paddle.static.data( name="input_NCHW_negetive", shape=[2, 3, -1, -1], - append_batch_size=False, dtype="float32", ) @@ -930,10 +926,9 @@ class TestPool2DAPI(unittest.TestCase): class TestPool2DAPI_Error(unittest.TestCase): def test_api(self): - input_NHWC = fluid.layers.data( + input_NHWC = paddle.static.data( name="input_NHWC", shape=[2, 5, 5, 3], - append_batch_size=False, dtype="float32", ) ksize = [3, 3] diff --git a/python/paddle/fluid/tests/unittests/mlu/test_split_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_split_op_mlu.py index 072b985613..83d18b6673 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_split_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_split_op_mlu.py @@ -94,7 +94,7 @@ class TestCase5(TestCase1): class API_TestSplit(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data = fluid.layers.data('data', shape=[-1, 10], dtype='float32') + data = paddle.static.data('data', shape=[-1, 10], dtype='float32') x0, x1 = paddle.split(data, num_or_sections=(3, 7), axis=1) place = fluid.MLUPlace(0) exe = fluid.Executor(place) @@ -108,7 +108,7 @@ class API_TestSplit(unittest.TestCase): class API_TestSplit2(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data = fluid.layers.data('data', shape=[-1, 10], dtype='float32') + data = paddle.static.data('data', shape=[-1, 10], dtype='float32') x0, x1 = paddle.split(data, num_or_sections=2, axis=1) place = fluid.MLUPlace(0) exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_stack_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_stack_op_mlu.py index eefe1d7d69..8bbd9e7789 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_stack_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_stack_op_mlu.py @@ -120,9 +120,9 @@ class TestStackOpHalf(TestStackOpBase): class API_test(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.layers.data('data1', shape=[1, 2], dtype='float32') - data2 = fluid.layers.data('data2', shape=[1, 2], dtype='float32') - data3 = fluid.layers.data('data3', shape=[1, 2], dtype='float32') + data1 = paddle.static.data('data1', shape=[-1, 1, 2], dtype='float32') + data2 = paddle.static.data('data2', shape=[-1, 1, 2], dtype='float32') + data3 = paddle.static.data('data3', shape=[-1, 1, 2], dtype='float32') result_stack = paddle.stack([data1, data2, data3], axis=0) place = paddle.MLUPlace(0) exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_strided_slice_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_strided_slice_op_mlu.py index 3506948505..07a9179c5a 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_strided_slice_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_strided_slice_op_mlu.py @@ -527,20 +527,19 @@ class TestStridedSliceAPI(unittest.TestCase): input = np.random.random([3, 4, 5, 6]).astype("float32") minus_1 = fluid.layers.fill_constant([1], "int32", -1) minus_3 = fluid.layers.fill_constant([1], "int32", -3) - starts = fluid.layers.data( - name='starts', shape=[3], dtype='int32', append_batch_size=False + starts = paddle.static.data( + name='starts', shape=[3], dtype='int32' ) - ends = fluid.layers.data( - name='ends', shape=[3], dtype='int32', append_batch_size=False + ends = paddle.static.data( + name='ends', shape=[3], dtype='int32' ) - strides = fluid.layers.data( - name='strides', shape=[3], dtype='int32', append_batch_size=False + strides = paddle.static.data( + name='strides', shape=[3], dtype='int32' ) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[3, 4, 5, 6], - append_batch_size=False, dtype="float32", ) out_1 = paddle.strided_slice( diff --git a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_extra.py b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_extra.py index d9ec53af9b..eb048e1ee6 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_extra.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_sync_batch_norm_op_mlu_extra.py @@ -46,7 +46,7 @@ class TestDygraphSyncBatchNormAPIError(unittest.TestCase): self.assertRaises(TypeError, my_sync_batch_norm, x1) # the input dtype of SyncBatchNorm must be float16 or float32 - x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="int32") + x2 = paddle.static.data(name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32") self.assertRaises(TypeError, my_sync_batch_norm, x2) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_tile_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_tile_op_mlu.py index 2f1a0d990b..c3f4b8a755 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_tile_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_tile_op_mlu.py @@ -221,9 +221,9 @@ class TestTileError(unittest.TestCase): ) repeat_times = [2, 2] self.assertRaises(TypeError, paddle.tile, x1, repeat_times) - x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") + x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="uint8") self.assertRaises(TypeError, paddle.tile, x2, repeat_times) - x3 = fluid.layers.data(name='x3', shape=[4], dtype="bool") + x3 = paddle.static.data(name='x3', shape=[-1, 4], dtype="bool") x3.stop_gradient = False self.assertRaises(ValueError, paddle.tile, x3, repeat_times) @@ -232,7 +232,7 @@ class TestTileAPIStatic(unittest.TestCase): def test_api(self): with program_guard(Program(), Program()): repeat_times = [2, 2] - x1 = fluid.layers.data(name='x1', shape=[4], dtype="int32") + x1 = paddle.static.data(name='x1', shape=[-1, 4], dtype="int32") out = paddle.tile(x1, repeat_times) positive_2 = fluid.layers.fill_constant([1], dtype="int32", value=2) out2 = paddle.tile(x1, repeat_times=[positive_2, 2]) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py index a802f9da21..60cd1e27c7 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py @@ -188,7 +188,7 @@ class TestTransposeOpError(unittest.TestCase): def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[10, 5, 3], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 10, 5, 3], dtype='float32') def test_x_Variable_check(): # the Input(x)'s type must be Variable diff --git a/python/paddle/fluid/tests/unittests/mlu/test_where_index_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_where_index_op_mlu.py index 877207eab7..a12c4427a7 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_where_index_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_where_index_op_mlu.py @@ -107,7 +107,7 @@ class TestRank3(TestWhereIndexOp): class TestWhereOpError(unittest.TestCase): def test_api(self): with program_guard(Program(), Program()): - cond = fluid.layers.data(name='cond', shape=[4], dtype='bool') + cond = paddle.static.data(name='cond', shape=[-1, 4], dtype='bool') result = paddle.nonzero(cond) exe = fluid.Executor(paddle.device.MLUPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/mlu/test_where_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_where_op_mlu.py index a2d0c1a699..a2fc6e2073 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_where_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_where_op_mlu.py @@ -90,14 +90,14 @@ class TestWhereAPI(unittest.TestCase): for x_stop_gradient in [False, True]: for y_stop_gradient in [False, True]: with fluid.program_guard(Program(), Program()): - cond = fluid.layers.data( - name='cond', shape=self.shape, dtype='bool' + cond = paddle.static.data( + name='cond', shape=[-1] + self.shape, dtype='bool' ) - x = fluid.layers.data( - name='x', shape=self.shape, dtype='float32' + x = paddle.static.data( + name='x', shape=[-1] + self.shape, dtype='float32' ) - y = fluid.layers.data( - name='y', shape=self.shape, dtype='float32' + y = paddle.static.data( + name='y', shape=[-1] + self.shape, dtype='float32' ) x.stop_gradient = x_stop_gradient y.stop_gradient = y_stop_gradient @@ -137,8 +137,8 @@ class TestWhereAPI(unittest.TestCase): def test_api_broadcast(self, use_mlu=False): main_program = Program() with fluid.program_guard(main_program): - x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32') - y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 4, 1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 4, 2], dtype='float32') x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype('float32') y_i = np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]).astype( 'float32' @@ -161,8 +161,8 @@ class TestWhereAPI(unittest.TestCase): main_program = Program() with fluid.program_guard(main_program): cond_shape = [2, 4] - cond = fluid.layers.data( - name='cond', shape=cond_shape, dtype='bool' + cond = paddle.static.data( + name='cond', shape=[-1] + cond_shape, dtype='bool' ) x_data = 1.0 y_data = 2.0 @@ -185,11 +185,11 @@ class TestWhereAPI(unittest.TestCase): paddle.enable_static() main_program = Program() with fluid.program_guard(main_program): - cond = fluid.layers.data( - name='cond', shape=cond_shape, dtype='bool' + cond = paddle.static.data( + name='cond', shape=[-1] + cond_shape, dtype='bool' ) - x = fluid.layers.data(name='x', shape=x_shape, dtype='float32') - y = fluid.layers.data(name='y', shape=y_shape, dtype='float32') + x = paddle.static.data(name='x', shape=[-1] + x_shape, dtype='float32') + y = paddle.static.data(name='y', shape=[-1] + y_shape, dtype='float32') cond_data_tmp = np.random.random(size=cond_shape).astype('float32') cond_data = cond_data_tmp < 0.3 x_data = np.random.random(size=x_shape).astype('float32') @@ -340,7 +340,7 @@ class TestWhereDygraphAPI(unittest.TestCase): def test_where_condition(self): data = np.array([[True, False], [False, True]]) with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[(-1), 2]) + x = paddle.static.data(name='x', shape=[(-1), 2]) y = paddle.where(x) self.assertEqual(type(y), tuple) self.assertEqual(len(y), 2) @@ -353,7 +353,7 @@ class TestWhereDygraphAPI(unittest.TestCase): np.testing.assert_allclose(expect_out, np.array(res)) data = np.array([True, True, False]) with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[(-1)]) + x = paddle.static.data(name='x', shape=[(-1)]) y = paddle.where(x) self.assertEqual(type(y), tuple) self.assertEqual(len(y), 1) @@ -379,9 +379,9 @@ class TestWhereOpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable) def test_type(): - x = fluid.layers.data(name='x', shape=[4], dtype='bool') - y = fluid.layers.data(name='y', shape=[4], dtype='float16') - cond = fluid.layers.data(name='cond', shape=[4], dtype='int32') + x = paddle.static.data(name='x', shape=[-1, 4], dtype='bool') + y = paddle.static.data(name='y', shape=[-1, 4], dtype='float16') + cond = paddle.static.data(name='cond', shape=[-1, 4], dtype='int32') paddle.where(cond, x, y) self.assertRaises(TypeError, test_type) diff --git a/python/paddle/fluid/tests/unittests/npu/collective_identity_op_npu.py b/python/paddle/fluid/tests/unittests/npu/collective_identity_op_npu.py index f8f8612ca8..c0a871e51f 100644 --- a/python/paddle/fluid/tests/unittests/npu/collective_identity_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/collective_identity_op_npu.py @@ -42,8 +42,8 @@ class TestCollectiveIdentity(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) toutdata = main_prog.current_block().create_var( name="outofgather", diff --git a/python/paddle/fluid/tests/unittests/npu/sync_batch_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/sync_batch_norm_op_npu.py index 72c822ad3a..0d0b469881 100644 --- a/python/paddle/fluid/tests/unittests/npu/sync_batch_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/sync_batch_norm_op_npu.py @@ -74,11 +74,10 @@ class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase): use_cudnn = False with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - data = fluid.layers.data( + data = paddle.static.data( name='input', - shape=self.dshape, + shape=[-1] + self.dshape, dtype=self.dtype, - append_batch_size=False, ) conv = paddle.static.nn.conv2d( input=data, diff --git a/python/paddle/fluid/tests/unittests/npu/test_atan_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_atan_op_npu.py index fe22453f4f..c6fa750a17 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_atan_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_atan_op_npu.py @@ -52,7 +52,7 @@ class TestAtan(OpTest): def test_out_name(self): with fluid.program_guard(fluid.Program()): np_x = np.array([0.1]) - data = fluid.layers.data(name="X", shape=[1]) + data = paddle.static.data(name="X", shape=[1]) out = paddle.atan(data, name='Y') place = paddle.NPUPlace(0) exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py index 122429a7f8..37dcf8465b 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_clip_op_npu.py @@ -122,7 +122,7 @@ class TestClipOpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable) def test_dtype(): - x2 = fluid.layers.data(name='x2', shape=[1], dtype='int32') + x2 = paddle.static.data(name='x2', shape=[-1, 1], dtype='int32') paddle.clip(x=x2, min=-1.0, max=1.0) self.assertRaises(TypeError, test_dtype) diff --git a/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py index 4fd0be14c1..6caa442fc0 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_compare_op_npu.py @@ -44,9 +44,9 @@ def create_test_class(op_type, typename, callback): def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): - a = fluid.layers.data(name='a', shape=[2], dtype='float32') - b = fluid.layers.data(name='b', shape=[2], dtype='float32') - c = fluid.layers.data(name='c', shape=[2], dtype='int16') + a = paddle.static.data(name='a', shape=[-1, 2], dtype='float32') + b = paddle.static.data(name='b', shape=[-1, 2], dtype='float32') + c = paddle.static.data(name='c', shape=[-1, 2], dtype='int16') d = fluid.create_lod_tensor(np.array([[-1]]), [[1]], self.place) op = eval("fluid.layers.%s" % self.op_type) @@ -134,8 +134,8 @@ def create_test_class(op_type, typename, callback): def test_attr_name(self): paddle.enable_static() with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[4], dtype=typename) - y = fluid.layers.data(name='y', shape=[4], dtype=typename) + x = paddle.static.data(name='x', shape=[-1, 4], dtype=typename) + y = paddle.static.data(name='y', shape=[-1, 4], dtype=typename) op = eval("paddle.%s" % (self.op_type)) out = op(x=x, y=y, name="name_%s" % (self.op_type)) self.assertEqual("name_%s" % (self.op_type) in out.name, True) diff --git a/python/paddle/fluid/tests/unittests/npu/test_conv2d_transpose_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_conv2d_transpose_op_npu.py index aae34ebfb5..b2f966629d 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_conv2d_transpose_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_conv2d_transpose_op_npu.py @@ -429,11 +429,11 @@ class TestWithEvenUpsample_NHWC_output_padding_FP16( class TestConv2DTransposeAPI(unittest.TestCase): def test_case1(self): - data1 = fluid.layers.data( - name='data1', shape=[3, 5, 5], dtype='float32' + data1 = paddle.static.data( + name='data1', shape=[-1, 3, 5, 5], dtype='float32' ) - data2 = fluid.layers.data( - name='data2', shape=[5, 5, 3], dtype='float32' + data2 = paddle.static.data( + name='data2', shape=[-1, 5, 5, 3], dtype='float32' ) out1 = paddle.static.nn.conv2d_transpose( input=data1, diff --git a/python/paddle/fluid/tests/unittests/npu/test_conv3d_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_conv3d_op_npu.py index 65ec9a489f..cdb7def1c2 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_conv3d_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_conv3d_op_npu.py @@ -350,17 +350,15 @@ class TestCase1_AsyPadding(TestConv3DOp_2): class TestConv3DAPI(unittest.TestCase): def test_api(self): - input_NDHWC = fluid.layers.data( + input_NDHWC = paddle.static.data( name="input_NDHWC", shape=[2, 5, 5, 5, 3], - append_batch_size=False, dtype="float32", ) - input_NCDHW = fluid.layers.data( + input_NCDHW = paddle.static.data( name="input_NCDHW", shape=[2, 3, 5, 5, 3], - append_batch_size=False, dtype="float32", ) @@ -433,10 +431,9 @@ class TestConv3DAPI(unittest.TestCase): class TestConv3DAPI_Error(unittest.TestCase): def test_api(self): - input = fluid.layers.data( + input = paddle.static.data( name="input", shape=[2, 5, 5, 5, 4], - append_batch_size=False, dtype="float32", ) @@ -519,10 +516,9 @@ class TestConv3DAPI_Error(unittest.TestCase): self.assertRaises(ValueError, run_5) # ValueError: channel dimmention - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[2, 5, 5, 5, -1], - append_batch_size=False, dtype="float32", ) diff --git a/python/paddle/fluid/tests/unittests/npu/test_expand_as_v2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_expand_as_v2_op_npu.py index ca2b0195b4..9625f72d8c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_expand_as_v2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_expand_as_v2_op_npu.py @@ -122,14 +122,13 @@ class TestExpandAsV2API(unittest.TestCase): def test_api(self): input1 = np.random.random([12, 14]).astype("float32") input2 = np.random.random([2, 12, 14]).astype("float32") - x = fluid.layers.data( - name='x', shape=[12, 14], append_batch_size=False, dtype="float32" + x = paddle.static.data( + name='x', shape=[12, 14], dtype="float32" ) - y = fluid.layers.data( + y = paddle.static.data( name='target_tensor', shape=[2, 12, 14], - append_batch_size=False, dtype="float32", ) diff --git a/python/paddle/fluid/tests/unittests/npu/test_expand_v2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_expand_v2_op_npu.py index 0ea52f04d9..2176f67047 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_expand_v2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_expand_v2_op_npu.py @@ -252,9 +252,9 @@ class TestExpandV2Error(unittest.TestCase): ) shape = [2, 2] self.assertRaises(TypeError, paddle.tensor.expand, x1, shape) - x2 = fluid.layers.data(name='x2', shape=[2], dtype="uint8") + x2 = paddle.static.data(name='x2', shape=[-1, 2], dtype="uint8") self.assertRaises(TypeError, paddle.tensor.expand, x2, shape) - x3 = fluid.layers.data(name='x3', shape=[2], dtype="bool") + x3 = paddle.static.data(name='x3', shape=[-1, 2], dtype="bool") x3.stop_gradient = False self.assertRaises(ValueError, paddle.tensor.expand, x3, shape) @@ -264,18 +264,16 @@ class TestExpandV2API(unittest.TestCase): def test_static(self): with fluid.program_guard(fluid.Program(), fluid.Program()): input = np.random.random([12, 14]).astype("float32") - x = fluid.layers.data( + x = paddle.static.data( name='x', shape=[12, 14], - append_batch_size=False, dtype="float32", ) positive_2 = fluid.layers.fill_constant([1], "int32", 12) - expand_shape = fluid.layers.data( + expand_shape = paddle.static.data( name="expand_shape", shape=[2], - append_batch_size=False, dtype="int32", ) diff --git a/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py index a6fa001076..3ce9042d75 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_gather_op_npu.py @@ -84,8 +84,8 @@ class TestCase1(TestGatherOp): class API_TestGather(unittest.TestCase): def test_out1(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.layers.data('data1', shape=[-1, 2], dtype='float32') - index = fluid.layers.data('index', shape=[-1, 1], dtype='int32') + data1 = paddle.static.data('data1', shape=[-1, 2], dtype='float32') + index = paddle.static.data('index', shape=[-1, 1], dtype='int32') out = paddle.gather(data1, index) place = paddle.NPUPlace(0) exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py index 7f95e2b55c..56430ee7c1 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_group_norm_op_npu.py @@ -57,8 +57,8 @@ class TestGroupNormOpError(unittest.TestCase): self.assertRaises(TypeError, test_x_type) def test_x_dtype(): - x2 = fluid.layers.data( - name='x2', shape=[2, 100, 3, 5], dtype='int32' + x2 = paddle.static.data( + name='x2', shape=[-1, 2, 100, 3, 5], dtype='int32' ) groups = 2 paddle.static.nn.group_norm(x2, groups) diff --git a/python/paddle/fluid/tests/unittests/npu/test_momentum_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_momentum_op_npu.py index b500b44e7e..440f681b01 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_momentum_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_momentum_op_npu.py @@ -108,8 +108,8 @@ class TestMomentumV2(unittest.TestCase): place = fluid.NPUPlace(0) main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost =paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) @@ -236,8 +236,8 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): place = fluid.NPUPlace(0) main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost =paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) diff --git a/python/paddle/fluid/tests/unittests/npu/test_one_hot_v2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_one_hot_v2_op_npu.py index 4ccd33134b..d50c6dea78 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_one_hot_v2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_one_hot_v2_op_npu.py @@ -224,7 +224,7 @@ class TestOneHotOpApi(unittest.TestCase): ) def _run(self, depth): - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") one_hot_label = fluid.one_hot(input=label, depth=depth) place = fluid.NPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/npu/test_sin_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_sin_op_npu.py index 971184956e..b441efd013 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sin_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sin_op_npu.py @@ -55,7 +55,7 @@ def test_class(op_type, typename): def test_out_name(self): with fluid.program_guard(fluid.Program()): np_x = np.array([0.1]) - data = fluid.layers.data(name="X", shape=[1]) + data = paddle.static.data(name="X", shape=[-1, 1]) out = eval("paddle.%s(data, name='Y')" % self.op_type) place = fluid.NPUPlace(0) exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/npu/test_split_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_split_op_npu.py index d6a6cf9d57..38cc028760 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_split_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_split_op_npu.py @@ -100,7 +100,7 @@ class TestCase5(TestCase1): class API_TestSplit(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data = fluid.layers.data('data', shape=[-1, 10], dtype='float32') + data = paddle.static.data('data', shape=[-1, 10], dtype='float32') x0, x1 = paddle.split(data, num_or_sections=(3, 7), axis=1) place = fluid.NPUPlace(0) exe = fluid.Executor(place) @@ -114,7 +114,7 @@ class API_TestSplit(unittest.TestCase): class API_TestSplit2(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data = fluid.layers.data('data', shape=[-1, 10], dtype='float32') + data = paddle.static.data('data', shape=[-1, 10], dtype='float32') x0, x1 = paddle.split(data, num_or_sections=2, axis=1) place = fluid.NPUPlace(0) exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py index d14b84b645..1e185195d1 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_stack_op_npu.py @@ -196,9 +196,9 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): class API_test(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.layers.data('data1', shape=[1, 2], dtype='float32') - data2 = fluid.layers.data('data2', shape=[1, 2], dtype='float32') - data3 = fluid.layers.data('data3', shape=[1, 2], dtype='float32') + data1 = paddle.static.data('data1', shape=[-1, 1, 2], dtype='float32') + data2 = paddle.static.data('data2', shape=[-1, 1, 2], dtype='float32') + data3 = paddle.static.data('data3', shape=[-1, 1, 2], dtype='float32') result_stack = paddle.stack([data1, data2, data3], axis=0) place = paddle.NPUPlace(0) exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/npu/test_strided_slice_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_strided_slice_op_npu.py index c4470b101d..1bee4627e1 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_strided_slice_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_strided_slice_op_npu.py @@ -580,20 +580,19 @@ class TestStridedSliceAPI(unittest.TestCase): input = np.random.random([3, 4, 5, 6]).astype("float64") minus_1 = fluid.layers.fill_constant([1], "int32", -1) minus_3 = fluid.layers.fill_constant([1], "int32", -3) - starts = fluid.layers.data( - name='starts', shape=[3], dtype='int32', append_batch_size=False + starts = paddle.static.data( + name='starts', shape=[3], dtype='int32' ) - ends = fluid.layers.data( - name='ends', shape=[3], dtype='int32', append_batch_size=False + ends = paddle.static.data( + name='ends', shape=[3], dtype='int32' ) - strides = fluid.layers.data( - name='strides', shape=[3], dtype='int32', append_batch_size=False + strides = paddle.static.data( + name='strides', shape=[3], dtype='int32' ) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[3, 4, 5, 6], - append_batch_size=False, dtype="float64", ) out_1 = paddle.strided_slice( diff --git a/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_op_npu_extra.py b/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_op_npu_extra.py index 5c8d00b8fc..9cd59bb8a5 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_op_npu_extra.py +++ b/python/paddle/fluid/tests/unittests/npu/test_sync_batch_norm_op_npu_extra.py @@ -46,7 +46,7 @@ class TestDygraphSyncBatchNormAPIError(unittest.TestCase): # the input dtype of SyncBatchNorm must be float16 or float32 # float16 only can be set on GPU place and NPU place - x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="int32") + x2 = paddle.static.data(name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32") self.assertRaises(TypeError, my_sync_batch_norm, x2) diff --git a/python/paddle/fluid/tests/unittests/npu/test_where_index_npu.py b/python/paddle/fluid/tests/unittests/npu/test_where_index_npu.py index 315f475a5c..a2bb351d65 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_where_index_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_where_index_npu.py @@ -97,7 +97,7 @@ class TestRank3(TestWhereIndexOp): class TestWhereOpError(unittest.TestCase): def test_api(self): with program_guard(Program(), Program()): - cond = fluid.layers.data(name='cond', shape=[4], dtype='bool') + cond = paddle.static.data(name='cond', shape=[-1, 4], dtype='bool') result = paddle.nonzero(cond) exe = fluid.Executor(paddle.NPUPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py index 9e1126d0aa..5e9baa696f 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_where_op_npu.py @@ -133,8 +133,8 @@ class TestNPUWhereAPI(unittest.TestCase): train_prog = fluid.Program() startup = fluid.Program() with fluid.program_guard(train_prog, startup): - x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32') - y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 4, 1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 4, 2], dtype='float32') x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype("float32") y_i = np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]).astype( "float32" diff --git a/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py index 8314f836c7..a8bcfeae68 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py @@ -27,14 +27,14 @@ paddle.enable_static() class TestWhileOp(unittest.TestCase): def simple_net(self): - d0 = layers.data( - "d0", shape=[10], append_batch_size=False, dtype='float32' + d0 = paddle.static.data( + "d0", shape=[10], dtype='float32' ) - d1 = layers.data( - "d1", shape=[10], append_batch_size=False, dtype='float32' + d1 = paddle.static.data( + "d1", shape=[10], dtype='float32' ) - d2 = layers.data( - "d2", shape=[10], append_batch_size=False, dtype='float32' + d2 = paddle.static.data( + "d2", shape=[10], dtype='float32' ) # fill_constant npu op doesn't support int64 i = layers.zeros(shape=[1], dtype='int32') diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py index cb262f9495..bd18a20525 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py @@ -89,7 +89,9 @@ class TestSequenceConcatOpError(unittest.TestCase): def test_errors(self): def test_input_list(): # the input type must be list - x_data = fluid.layers.data(name='x', shape=[4], dtype='float32') + x_data = paddle.static.data( + name='x', shape=[-1, 4], dtype='float32' + ) fluid.layers.sequence_concat(input=x_data) self.assertRaises(TypeError, test_input_list) @@ -97,12 +99,16 @@ class TestSequenceConcatOpError(unittest.TestCase): def test_variable1(): # the input element type must be Variable x1_data = np.array([[3, 5]]).astype('float32') - y1_data = fluid.layers.data(name='y1', shape=[4], dtype='float32') + y1_data = paddle.static.data( + name='y1', shape=[-1, 4], dtype='float32' + ) fluid.layers.sequence_concat(input=[x1_data, y1_data]) def test_variable2(): x2_data = np.array([[3, 5]]).astype('float32') - y2_data = fluid.layers.data(name='y2', shape=[4], dtype='float32') + y2_data = paddle.static.data( + name='y2', shape=[-1, 4], dtype='float32' + ) fluid.layers.sequence_concat(input=[y2_data, x2_data]) for i in range(2): @@ -113,8 +119,12 @@ class TestSequenceConcatOpError(unittest.TestCase): def test_dtype(): # dtype must be 'float32', 'float64', 'int64' - x3_data = fluid.layers.data(name="x3", shape=[3, 5], dtype='int32') - y3_data = fluid.layers.data(name="y3", shape=[3, 5], dtype='int16') + x3_data = paddle.static.data( + name="x3", shape=[-1, 3, 5], dtype='int32' + ) + y3_data = paddle.static.data( + name="y3", shape=[-1, 3, 5], dtype='int16' + ) input_list = [x3_data, y3_data] fluid.layers.sequence_concat(input=input_list) diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_conv.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_conv.py index ec296d969c..2376716b7c 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_conv.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_conv.py @@ -18,6 +18,8 @@ import unittest import numpy as np +import paddle + sys.path.append("../") from op_test import OpTest @@ -283,7 +285,7 @@ class TestSeqConvApi(unittest.TestCase): def test_api(self): import paddle.fluid as fluid - x = fluid.layers.data('x', shape=[32], lod_level=1) + x = paddle.static.data('x', shape=[-1, 32], lod_level=1) y = fluid.layers.sequence_conv( input=x, num_filters=2, filter_size=3, padding_start=None ) diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_first_step.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_first_step.py index 85e225c896..704462b040 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_first_step.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_first_step.py @@ -16,6 +16,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid.framework import Program, program_guard @@ -35,10 +36,9 @@ class TestSequenceFirstStepOpError(unittest.TestCase): def test_input_dtype(): # the dtype of input must be int64 - type_data = fluid.layers.data( + type_data = paddle.static.data( name='type_data', shape=[7, 1], - append_batch_size=False, dtype='int64', lod_level=1, ) diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_last_step.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_last_step.py index e269b46517..165f941f58 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_last_step.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_last_step.py @@ -16,6 +16,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid from paddle.fluid.framework import Program, program_guard @@ -35,10 +36,9 @@ class TestSequenceLastStepOpError(unittest.TestCase): def test_input_dtype(): # the dtype of input must be int64 - type_data = fluid.layers.data( + type_data = paddle.static.data( name='type_data', shape=[7, 1], - append_batch_size=False, dtype='int64', lod_level=1, ) diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py index a3dd8373d9..421b4e9689 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py @@ -17,6 +17,8 @@ import unittest import numpy as np +import paddle + sys.path.append("../") from op_test import OpTest @@ -162,8 +164,8 @@ class TestSequencePadOpError(unittest.TestCase): self.assertRaises(TypeError, test_x_variable) def test_pad_value_variable(): - x1 = fluid.layers.data( - name='x1', shape=[10, 5], dtype='float32', lod_level=1 + x1 = paddle.static.data( + name='x1', shape=[-1, 10, 5], dtype='float32', lod_level=1 ) pad_value1 = np.array([0.0], dtype=np.float32) fluid.layers.sequence_pad(x=x1, pad_value=pad_value1) @@ -171,8 +173,8 @@ class TestSequencePadOpError(unittest.TestCase): self.assertRaises(TypeError, test_pad_value_variable) def test_dtype(): - x2 = fluid.layers.data( - name='x2', shape=[10, 5], dtype='int16', lod_level=1 + x2 = paddle.static.data( + name='x2', shape=[-1, 10, 5], dtype='int16', lod_level=1 ) pad_value2 = fluid.layers.assign( input=np.array([0.0], dtype=np.int32) diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py index 5cda07af7d..9f90325150 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py @@ -17,6 +17,8 @@ import unittest import numpy as np +import paddle + sys.path.append("../") from op_test import OpTest @@ -93,10 +95,9 @@ class TestSequenceReshapeOpError(unittest.TestCase): self.assertRaises(TypeError, test_variable) def test_dtype(): - x1 = fluid.layers.data( + x1 = paddle.static.data( name='x1', - shape=[2, 6], - append_batch_size=False, + shape=[-1, 2, 6], dtype='float16', lod_level=1, ) diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py index 13897df00a..f141806aac 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py @@ -17,6 +17,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid sys.path.append("../") @@ -106,7 +107,9 @@ class TestSequenceReverseOpError(unittest.TestCase): def test_dtype(): # dtype must be 'float32', 'float64', 'int8', 'int32', 'int64' - x2_data = fluid.layers.data(name='x2', shape=[4], dtype='float16') + x2_data = paddle.static.data( + name='x2', shape=[-1, 4], dtype='float16' + ) fluid.layers.sequence_reverse(x=x2_data) self.assertRaises(TypeError, test_dtype) diff --git a/python/paddle/fluid/tests/unittests/seresnext_net.py b/python/paddle/fluid/tests/unittests/seresnext_net.py index beb24de946..6e1ad62dc1 100644 --- a/python/paddle/fluid/tests/unittests/seresnext_net.py +++ b/python/paddle/fluid/tests/unittests/seresnext_net.py @@ -131,8 +131,10 @@ img_shape = [3, 224, 224] def SE_ResNeXt50Small(use_feed): - img = fluid.layers.data(name='image', shape=img_shape, dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data( + name='image', shape=[-1] + img_shape, dtype='float32' + ) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') conv = conv_bn_layer( input=img, num_filters=16, filter_size=3, stride=2, act='relu' diff --git a/python/paddle/fluid/tests/unittests/simple_nets.py b/python/paddle/fluid/tests/unittests/simple_nets.py index 2b41107061..a3ff2b6865 100644 --- a/python/paddle/fluid/tests/unittests/simple_nets.py +++ b/python/paddle/fluid/tests/unittests/simple_nets.py @@ -40,8 +40,8 @@ def simple_fc_net_with_inputs(img, label, class_num=10): def simple_fc_net(use_feed=None): - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data(name='image', shape=[-1, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') return simple_fc_net_with_inputs(img, label, class_num=10) @@ -70,8 +70,8 @@ def batchnorm_fc_with_inputs(img, label, class_num=10): def fc_with_batchnorm(use_feed=None): - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data(name='image', shape=[-1, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') return batchnorm_fc_with_inputs(img, label, class_num=10) @@ -89,10 +89,10 @@ def bow_net( This model is from https://github.com/PaddlePaddle/models: fluid/PaddleNLP/text_classification/nets.py """ - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 ) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") emb = fluid.layers.embedding( input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] ) diff --git a/python/paddle/fluid/tests/unittests/test_accuracy_op.py b/python/paddle/fluid/tests/unittests/test_accuracy_op.py index 446240e9fb..431d8b24bc 100755 --- a/python/paddle/fluid/tests/unittests/test_accuracy_op.py +++ b/python/paddle/fluid/tests/unittests/test_accuracy_op.py @@ -66,16 +66,18 @@ class TestAccuracyOpError(unittest.TestCase): x1 = fluid.create_lod_tensor( np.array([[-1]]), [[1]], fluid.CPUPlace() ) - label = fluid.layers.data( + label = paddle.static.data( name='label', shape=[-1, 1], dtype="int32" ) self.assertRaises(TypeError, paddle.static.accuracy, x1, label) self.assertRaises(TypeError, paddle.metric.accuracy, x1, label) # The input dtype of accuracy_op must be float32 or float64. - x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32") + x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="int32") self.assertRaises(TypeError, paddle.static.accuracy, x2, label) self.assertRaises(TypeError, paddle.metric.accuracy, x2, label) - x3 = fluid.layers.data(name='input', shape=[-1, 2], dtype="float16") + x3 = paddle.static.data( + name='input', shape=[-1, 2], dtype="float16" + ) paddle.static.accuracy(input=x3, label=label) paddle.metric.accuracy(input=x3, label=label) diff --git a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py index 48349cfe91..8333da1acc 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py @@ -21,7 +21,6 @@ from decorator_helper import prog_scope import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers import paddle.nn.functional as F @@ -31,7 +30,7 @@ class TestSigmoidTripleGradCheck(unittest.TestCase): shape = [2, 3, 7, 9] eps = 0.0005 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = F.sigmoid(x) x_arr = np.random.random(shape).astype(dtype) @@ -58,7 +57,7 @@ class TestSigmoidDoubleGradCheck(unittest.TestCase): shape = [2, 3, 7, 9] eps = 0.0005 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = F.sigmoid(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -88,7 +87,7 @@ class TestTanhTripleGradCheck(unittest.TestCase): shape = [2, 3, 7, 9] eps = 0.0005 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = paddle.tanh(x) x_arr = np.random.random(shape).astype(dtype) @@ -118,7 +117,7 @@ class TestTanhDoubleGradCheck(unittest.TestCase): shape = [2, 3, 7, 9] eps = 0.0005 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = paddle.tanh(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -148,7 +147,7 @@ class TestAbsDoubleGradCheck(unittest.TestCase): shape = [2, 3, 7, 9] eps = 0.0005 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = paddle.abs(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -176,7 +175,7 @@ class TestReluDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) x.persistable = True y = F.relu(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -206,7 +205,7 @@ class TestLeakyReluDoubleGradCheck(unittest.TestCase): alpha = 0.2 dtype = np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) x.persistable = True y = paddle.nn.functional.leaky_relu(x, alpha) @@ -241,7 +240,7 @@ class TestELUDoubleGradCheck(unittest.TestCase): dtype = np.float64 SEED = 0 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) x.persistable = True y = paddle.nn.functional.elu(x, alpha=alpha) @@ -275,7 +274,7 @@ class TestCELUDoubleGradCheck(unittest.TestCase): dtype = np.float64 SEED = 0 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) x.persistable = True y = F.celu(x, alpha=alpha) @@ -307,7 +306,7 @@ class TestSqrtDoubleGradCheck(unittest.TestCase): eps = 0.0001 dtype = np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) x.persistable = True y = paddle.sqrt(x) @@ -339,7 +338,7 @@ class TestRsqrtDoubleGradCheck(unittest.TestCase): eps = 0.0001 dtype = np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) x.persistable = True y = paddle.rsqrt(x) @@ -372,7 +371,7 @@ class TestSquareDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) x.persistable = True y = paddle.square(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -403,7 +402,7 @@ class TestLogDoubleGradCheck(unittest.TestCase): eps = 1e-6 dtype = np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) x.persistable = True y = paddle.log(x) @@ -434,7 +433,7 @@ class TestSinDoubleGradCheck(unittest.TestCase): shape = [2, 3, 7, 9] eps = 0.0005 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = paddle.sin(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -464,7 +463,7 @@ class TestCosDoubleGradCheck(unittest.TestCase): shape = [2, 3, 7, 9] eps = 0.0005 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = paddle.cos(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -494,7 +493,7 @@ class TestPowDoubleGradCheck1(unittest.TestCase): shape = [2, 3, 7, 9] eps = 1e-6 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = paddle.pow(x, 2) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -523,7 +522,7 @@ class TestPowDoubleGradCheck2(unittest.TestCase): shape = [2, 3, 7, 9] eps = 1e-6 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = paddle.pow(x, 1) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -552,7 +551,7 @@ class TestSinTripleGradCheck(unittest.TestCase): shape = [2, 3, 7, 9] eps = 0.0005 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = paddle.sin(x) x_arr = np.random.random(shape).astype(dtype) @@ -582,7 +581,7 @@ class TestPowTripleGradCheck1(unittest.TestCase): shape = [2, 3, 7, 9] eps = 1e-6 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = paddle.pow(x, 1) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -611,7 +610,7 @@ class TestPowTripleGradCheck2(unittest.TestCase): shape = [2, 3, 7, 9] eps = 1e-6 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = paddle.pow(x, 2) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -640,7 +639,7 @@ class TestPowTripleGradCheck3(unittest.TestCase): shape = [2, 3, 7, 9] eps = 1e-6 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = paddle.pow(x, 4) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -669,7 +668,7 @@ class TestCosTripleGradCheck(unittest.TestCase): shape = [2, 3, 7, 9] eps = 0.0005 dtype = np.float64 - x = layers.data('x', shape, False, dtype=dtype) + x = paddle.static.data('x', shape, dtype=dtype) x.persistable = True y = paddle.cos(x) x_arr = np.random.random(shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 2f0c594746..1f0a49cbb3 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -34,13 +34,13 @@ class TestSqrtOpError(unittest.TestCase): in1 = 1 self.assertRaises(TypeError, paddle.sqrt, in1) # The input dtype of sqrt op must be float16, float32, float64. - in2 = fluid.layers.data( - name='input2', shape=[12, 10], dtype="int32" + in2 = paddle.static.data( + name='input2', shape=[-1, 12, 10], dtype="int32" ) self.assertRaises(TypeError, paddle.sqrt, in2) - in3 = fluid.layers.data( - name='input3', shape=[12, 10], dtype="float16" + in3 = paddle.static.data( + name='input3', shape=[-1, 12, 10], dtype="float16" ) paddle.sqrt(x=in3) @@ -167,8 +167,8 @@ class TestExpm1API(unittest.TestCase): class TestParameter: def test_out_name(self): with fluid.program_guard(fluid.Program()): - np_x = np.array([0.1]) - data = fluid.layers.data(name="X", shape=[1]) + np_x = np.array([0.1]).astype('float32').reshape((-1, 1)) + data = paddle.static.data(name="X", shape=[-1, 1], dtype="float32") out = eval("paddle.%s(data, name='Y')" % self.op_type) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -520,8 +520,8 @@ class TestAtan(TestActivation, TestParameter): def test_out_name(self): with fluid.program_guard(fluid.Program()): - np_x = np.array([0.1]) - data = fluid.layers.data(name="X", shape=[1]) + np_x = np.array([0.1]).astype('float32').reshape((-1, 1)) + data = paddle.static.data(name="X", shape=[-1, 1], dtype="float32") out = paddle.atan(data, name='Y') place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -582,10 +582,9 @@ class TestSinhAPI(unittest.TestCase): input_x = np.random.uniform(0.1, 1, test_data_shape).astype( "float32" ) - data_x = fluid.layers.data( + data_x = paddle.static.data( name="data_x", shape=test_data_shape, - append_batch_size=False, dtype="float32", ) @@ -667,10 +666,9 @@ class TestCoshAPI(unittest.TestCase): input_x = np.random.uniform(0.1, 1, test_data_shape).astype( "float32" ) - data_x = fluid.layers.data( + data_x = paddle.static.data( name="data_x", shape=test_data_shape, - append_batch_size=False, dtype="float32", ) @@ -2399,12 +2397,8 @@ class TestLog(TestActivation): self.check_grad(['X'], 'Out', check_eager=True) def test_error(self): - in1 = fluid.layers.data( - name="in1", shape=[11, 17], append_batch_size=False, dtype="int32" - ) - in2 = fluid.layers.data( - name="in2", shape=[11, 17], append_batch_size=False, dtype="int64" - ) + in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32") + in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64") self.assertRaises(TypeError, paddle.log, in1) self.assertRaises(TypeError, paddle.log, in2) @@ -2569,10 +2563,9 @@ class TestLog1pAPI(unittest.TestCase): def test_api(self): with fluid.program_guard(fluid.Program(), fluid.Program()): input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64") - data_x = fluid.layers.data( + data_x = paddle.static.data( name="data_x", shape=[11, 17], - append_batch_size=False, dtype="float64", ) @@ -2718,12 +2711,8 @@ class TestPow_factor_tensor(TestActivation): def test_api(self): input = np.random.uniform(1, 2, [11, 17]).astype("float32") - x = fluid.layers.data( - name="x", shape=[11, 17], append_batch_size=False, dtype="float32" - ) - res = fluid.layers.data( - name="res", shape=[11, 17], append_batch_size=False, dtype="float32" - ) + x = paddle.static.data(name="x", shape=[11, 17], dtype="float32") + res = paddle.static.data(name="res", shape=[11, 17], dtype="float32") factor_1 = 2.0 factor_2 = fluid.layers.fill_constant([1], "float32", 3.0) diff --git a/python/paddle/fluid/tests/unittests/test_adadelta_op.py b/python/paddle/fluid/tests/unittests/test_adadelta_op.py index 95a485ce3a..51435ccb95 100644 --- a/python/paddle/fluid/tests/unittests/test_adadelta_op.py +++ b/python/paddle/fluid/tests/unittests/test_adadelta_op.py @@ -143,8 +143,8 @@ class TestAdadeltaV2(unittest.TestCase): place = fluid.CPUPlace() main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1) cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y diff --git a/python/paddle/fluid/tests/unittests/test_addmm_op.py b/python/paddle/fluid/tests/unittests/test_addmm_op.py index 9a1385c63b..2e4a9515b6 100644 --- a/python/paddle/fluid/tests/unittests/test_addmm_op.py +++ b/python/paddle/fluid/tests/unittests/test_addmm_op.py @@ -76,109 +76,86 @@ class TestAddMMOpError(unittest.TestCase): self.assertRaises(TypeError, paddle.addmm, input, x1, x2) # The input dtype of mul_op must be float32 or float64. - input = fluid.layers.data( + input = paddle.static.data( name='input', shape=[4, 4], dtype="int32", - append_batch_size=False, - ) - x3 = fluid.layers.data( - name='x3', shape=[4, 4], dtype="int32", append_batch_size=False - ) - x4 = fluid.layers.data( - name='x4', shape=[4, 4], dtype="int32", append_batch_size=False ) + x3 = paddle.static.data(name='x3', shape=[4, 4], dtype="int32") + x4 = paddle.static.data(name='x4', shape=[4, 4], dtype="int32") self.assertRaises(TypeError, paddle.addmm, input, x3, x4) # x and y dimension mismatch - x5 = fluid.layers.data( + x5 = paddle.static.data( name='x5', shape=[4, 5], dtype="float32", - append_batch_size=False, ) - x6 = fluid.layers.data( + x6 = paddle.static.data( name='x6', shape=[4, 4], dtype="float32", - append_batch_size=False, ) self.assertRaises(ValueError, paddle.addmm, input, x5, x6) # input and x are not broadcastable - x7 = fluid.layers.data( + x7 = paddle.static.data( name='x7', shape=[4, 4], dtype="float32", - append_batch_size=False, ) - x8 = fluid.layers.data( + x8 = paddle.static.data( name='x8', shape=[4, 4], dtype="float32", - append_batch_size=False, ) - input1 = fluid.layers.data( + input1 = paddle.static.data( name='input1', shape=[2, 4], dtype="float32", - append_batch_size=False, ) self.assertRaises(ValueError, paddle.addmm, input1, x7, x8) # input and x are not broadcastable - x9 = fluid.layers.data( + x9 = paddle.static.data( name='x9', shape=[4, 4], dtype="float32", - append_batch_size=False, ) - x10 = fluid.layers.data( + x10 = paddle.static.data( name='x10', shape=[4, 4], dtype="float32", - append_batch_size=False, ) - input2 = fluid.layers.data( + input2 = paddle.static.data( name='input2', shape=[1, 2], dtype="float32", - append_batch_size=False, ) self.assertRaises(ValueError, paddle.addmm, input2, x9, x10) - x11 = fluid.layers.data( + x11 = paddle.static.data( name='x11', shape=[4, 4], dtype="float32", - append_batch_size=False, - ) - x12 = fluid.layers.data( - name='x12', - shape=[4, 4], - dtype="float32", - append_batch_size=False, ) - input3 = fluid.layers.data( + x12 = paddle.static.data(name='x12', shape=[4, 4], dtype="float32") + input3 = paddle.static.data( name='input3', shape=[4, 2], dtype="float32", - append_batch_size=False, ) self.assertRaises(ValueError, paddle.addmm, input3, x11, x12) - x13 = fluid.layers.data( + x13 = paddle.static.data( name='x13', shape=[4, 4], dtype="float32", - append_batch_size=False, ) - x14 = fluid.layers.data( + x14 = paddle.static.data( name='x14', shape=[4, 4], dtype="float32", - append_batch_size=False, ) - input4 = fluid.layers.data( + input4 = paddle.static.data( name='input4', shape=[3, 1], dtype="float32", - append_batch_size=False, ) self.assertRaises(ValueError, paddle.addmm, input4, x13, x14) diff --git a/python/paddle/fluid/tests/unittests/test_argsort_op.py b/python/paddle/fluid/tests/unittests/test_argsort_op.py index 17614692f6..e3f90d7fd2 100644 --- a/python/paddle/fluid/tests/unittests/test_argsort_op.py +++ b/python/paddle/fluid/tests/unittests/test_argsort_op.py @@ -84,13 +84,17 @@ class TestArgsortOpCPU(unittest.TestCase): ) with fluid.program_guard(self.main_program, self.startup_program): - x = fluid.layers.data( - name="x", shape=self.input_shape, dtype=self.dtype + x = paddle.static.data( + name="x", shape=[-1] + list(self.input_shape), dtype=self.dtype ) x.stop_gradient = False - label = fluid.layers.data( - name="label", shape=self.input_shape, dtype=self.dtype + x.desc.set_need_check_feed(False) + label = paddle.static.data( + name="label", + shape=[-1] + list(self.input_shape), + dtype=self.dtype, ) + label.desc.set_need_check_feed(False) self.index = paddle.argsort( x=x, axis=self.axis, descending=self.descending ) diff --git a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py index 3e4543cd1c..497dc31477 100644 --- a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py +++ b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py @@ -62,9 +62,9 @@ class TestArrayReadWrite(unittest.TestCase): def test_read_write(self): paddle.enable_static() x = [ - layers.data(name='x0', shape=[100]), - layers.data(name='x1', shape=[100]), - layers.data(name='x2', shape=[100]), + paddle.static.data(name='x0', shape=[-1, 100]), + paddle.static.data(name='x1', shape=[-1, 100]), + paddle.static.data(name='x2', shape=[-1, 100]), ] for each_x in x: each_x.stop_gradient = False diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index 02f649b39b..8017840c50 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -22,7 +22,6 @@ from decorator_helper import prog_scope import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.fluid import Program, program_guard from paddle.fluid.backward import append_backward @@ -261,7 +260,7 @@ class TestAssignDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [3, 4, 5], False, dtype) + data = paddle.static.data('data', [3, 4, 5], dtype) data.persistable = True out = paddle.fluid.layers.assign(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) @@ -292,7 +291,7 @@ class TestAssignTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [3, 4, 5], False, dtype) + data = paddle.static.data('data', [3, 4, 5], dtype) data.persistable = True out = paddle.fluid.layers.assign(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py index fb6dc442a7..3c6594e659 100644 --- a/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py @@ -26,8 +26,10 @@ BATCH_SIZE = 64 def convolutional_neural_network(use_py_reader): with fluid.unique_name.guard(): - img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data( + name='img', shape=[-1, 1, 28, 28], dtype='float32' + ) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') py_reader = None if use_py_reader: diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py index 946b8959d0..25e4ab9aa8 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py @@ -38,7 +38,7 @@ from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer from paddle.distributed.auto_parallel.partitioner import Partitioner from paddle.distributed.auto_parallel.reshard import Resharder from paddle.distributed.fleet import auto -from paddle.fluid import core, layers +from paddle.fluid import core from paddle.fluid.initializer import NumpyArrayInitializer if os.getenv("CUDA_VISIBLE_DEVICES") is not None: @@ -588,7 +588,9 @@ class TestAutoParallelMapper(unittest.TestCase): root_id = 0 nranks = 2 with fluid.program_guard(train_program, startup_program): - input = layers.data(name="input", shape=[10, 10], dtype='float32') + input = paddle.static.data( + name="input", shape=[-1, 10, 10], dtype='float32' + ) output = train_program.current_block().create_var( name="outofbroadcast", dtype='float32', diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index 6802a8a9ea..c2a6c468e5 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -763,7 +763,9 @@ class TestBatchNormOpError(unittest.TestCase): # the input dtype of batch_norm must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="int32") + x2 = paddle.static.data( + name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32" + ) self.assertRaises(TypeError, paddle.static.nn.batch_norm, x2) @@ -779,7 +781,9 @@ class TestDygraphBatchNormAPIError(unittest.TestCase): # the input dtype of BatchNorm must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="int32") + x2 = paddle.static.data( + name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32" + ) self.assertRaises(TypeError, batch_norm, x2) diff --git a/python/paddle/fluid/tests/unittests/test_bmm_op.py b/python/paddle/fluid/tests/unittests/test_bmm_op.py index 5269f27ccd..aaa0e1f97f 100644 --- a/python/paddle/fluid/tests/unittests/test_bmm_op.py +++ b/python/paddle/fluid/tests/unittests/test_bmm_op.py @@ -41,10 +41,10 @@ class TestBmmOp(OpTest): class API_TestBmm(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.layers.data( + data1 = paddle.static.data( 'data1', shape=[-1, 3, 4], dtype='float64' ) - data2 = fluid.layers.data( + data2 = paddle.static.data( 'data2', shape=[-1, 4, 5], dtype='float64' ) result_bmm = paddle.bmm(data1, data2) diff --git a/python/paddle/fluid/tests/unittests/test_boxps.py b/python/paddle/fluid/tests/unittests/test_boxps.py index 4f2d3ee138..b6a1a845aa 100644 --- a/python/paddle/fluid/tests/unittests/test_boxps.py +++ b/python/paddle/fluid/tests/unittests/test_boxps.py @@ -96,11 +96,11 @@ class TestPullBoxSparseOP(unittest.TestCase): paddle.enable_static() program = fluid.Program() with fluid.program_guard(program): - x = fluid.layers.data( - name='x', shape=[1], dtype='int64', lod_level=0 + x = paddle.static.data( + name='x', shape=[-1, 1], dtype='int64', lod_level=0 ) - y = fluid.layers.data( - name='y', shape=[1], dtype='int64', lod_level=0 + y = paddle.static.data( + name='y', shape=[-1, 1], dtype='int64', lod_level=0 ) emb_x, emb_y = _pull_box_sparse([x, y], size=1) diff --git a/python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py b/python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py index 8a1b6a52a2..6eec711c49 100644 --- a/python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py +++ b/python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py @@ -164,11 +164,11 @@ class TestBroadcastTensorsAPI(unittest.TestCase): def test_api(self): def test_static(): inputs = [ - paddle.fluid.layers.data( - shape=[4, 1, 4, 1], dtype='float32', name="x0" + paddle.static.data( + shape=[-1, 4, 1, 4, 1], dtype='float32', name="x0" ), - paddle.fluid.layers.data( - shape=[1, 4, 1, 4], dtype='float32', name="x1" + paddle.static.data( + shape=[-1, 1, 4, 1, 4], dtype='float32', name="x1" ), ] paddle.broadcast_tensors(inputs) @@ -196,33 +196,33 @@ class TestRaiseBroadcastTensorsError(unittest.TestCase): def test_errors(self): def test_type(): inputs = [ - paddle.fluid.layers.data( - shape=[1, 1, 1, 1], dtype='float32', name="x4" + paddle.static.data( + shape=[-1, 1, 1, 1, 1], dtype='float32', name="x4" ), - paddle.fluid.layers.data( - shape=[1, 4, 1, 1], dtype='float64', name="x5" + paddle.static.data( + shape=[-1, 1, 4, 1, 1], dtype='float64', name="x5" ), ] paddle.broadcast_tensors(inputs) def test_dtype(): inputs = [ - paddle.fluid.layers.data( - shape=[1, 1, 1, 1], dtype='int8', name="x6" + paddle.static.data( + shape=[-1, 1, 1, 1, 1], dtype='int8', name="x6" ), - paddle.fluid.layers.data( - shape=[1, 4, 1, 1], dtype='int8', name="x7" + paddle.static.data( + shape=[-1, 1, 4, 1, 1], dtype='int8', name="x7" ), ] paddle.broadcast_tensors(inputs) def test_bcast_semantics(): inputs = [ - paddle.fluid.layers.data( - shape=[1, 3, 1, 1], dtype='float32', name="x9" + paddle.static.data( + shape=[-1, 1, 3, 1, 1], dtype='float32', name="x9" ), - paddle.fluid.layers.data( - shape=[1, 8, 1, 1], dtype='float32', name="x10" + paddle.static.data( + shape=[-1, 1, 8, 1, 1], dtype='float32', name="x10" ), ] paddle.broadcast_tensors(inputs) diff --git a/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py b/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py index ea8e7648b1..897c5b54b1 100644 --- a/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py +++ b/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py @@ -31,9 +31,9 @@ class TestBroadcastToError(unittest.TestCase): ) shape = [2, 2] self.assertRaises(TypeError, paddle.tensor.broadcast_to, x1, shape) - x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") + x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="uint8") self.assertRaises(TypeError, paddle.tensor.broadcast_to, x2, shape) - x3 = fluid.layers.data(name='x3', shape=[4], dtype="bool") + x3 = paddle.static.data(name='x3', shape=[-1, 4], dtype="bool") x3.stop_gradient = False self.assertRaises(ValueError, paddle.tensor.broadcast_to, x3, shape) @@ -42,15 +42,12 @@ class TestBroadcastToError(unittest.TestCase): class TestBroadcastToAPI(unittest.TestCase): def test_api(self): input = np.random.random([12, 14]).astype("float32") - x = fluid.layers.data( - name='x', shape=[12, 14], append_batch_size=False, dtype="float32" - ) + x = paddle.static.data(name='x', shape=[12, 14], dtype="float32") positive_2 = fluid.layers.fill_constant([1], "int32", 12) - expand_shape = fluid.layers.data( + expand_shape = paddle.static.data( name="expand_shape", shape=[2], - append_batch_size=False, dtype="int32", ) diff --git a/python/paddle/fluid/tests/unittests/test_cast_op.py b/python/paddle/fluid/tests/unittests/test_cast_op.py index ea7f24a488..0e451c3e9f 100644 --- a/python/paddle/fluid/tests/unittests/test_cast_op.py +++ b/python/paddle/fluid/tests/unittests/test_cast_op.py @@ -22,7 +22,6 @@ from op_test import OpTest, convert_float_to_uint16, convert_uint16_to_float import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.fluid import Program, program_guard @@ -142,7 +141,7 @@ class TestCastDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 3, 4], False, dtype) + data = paddle.static.data('data', [2, 3, 4], dtype) data.persistable = True out = paddle.cast(data, 'float64') data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) @@ -173,7 +172,7 @@ class TestCastTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 3, 4], False, dtype) + data = paddle.static.data('data', [2, 3, 4], dtype) data.persistable = True out = paddle.cast(data, 'float64') data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_communicator_async.py b/python/paddle/fluid/tests/unittests/test_communicator_async.py index ebc65cd5ac..322ec2f920 100644 --- a/python/paddle/fluid/tests/unittests/test_communicator_async.py +++ b/python/paddle/fluid/tests/unittests/test_communicator_async.py @@ -27,8 +27,8 @@ import paddle.fluid as fluid class TestCommunicator(unittest.TestCase): def net(self): - x = fluid.layers.data(name='x', shape=[1], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=x, label=y) avg_cost = paddle.mean(cost) diff --git a/python/paddle/fluid/tests/unittests/test_communicator_geo.py b/python/paddle/fluid/tests/unittests/test_communicator_geo.py index 9019e9e9e3..0e836dca1c 100644 --- a/python/paddle/fluid/tests/unittests/test_communicator_geo.py +++ b/python/paddle/fluid/tests/unittests/test_communicator_geo.py @@ -31,8 +31,10 @@ paddle.enable_static() class TestCommunicatorGeoEnd2End(unittest.TestCase): def net(self): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - x1 = fluid.layers.data(name='x1', shape=[1], dtype='int64', lod_level=1) + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + x1 = paddle.static.data( + name='x1', shape=[-1, 1], dtype='int64', lod_level=1 + ) emb = fluid.layers.embedding( input=x1, @@ -47,8 +49,7 @@ class TestCommunicatorGeoEnd2End(unittest.TestCase): pool = fluid.layers.sequence_pool(input=emb, pool_type="sum") z = fluid.layers.concat(input=[x, pool], axis=1) y_predict = paddle.static.nn.fc(x=z, size=1) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') - + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) return avg_cost, x, x1, y diff --git a/python/paddle/fluid/tests/unittests/test_communicator_ps_gpu.py b/python/paddle/fluid/tests/unittests/test_communicator_ps_gpu.py index f0c1e9c885..b767d98281 100644 --- a/python/paddle/fluid/tests/unittests/test_communicator_ps_gpu.py +++ b/python/paddle/fluid/tests/unittests/test_communicator_ps_gpu.py @@ -51,8 +51,8 @@ class TestCommunicator(unittest.TestCase): role = role_maker.PaddleCloudRoleMaker() fleet.init(role) - x = fluid.layers.data(name='x', shape=[1], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') slots_vars = [x, y] cost = paddle.nn.functional.square_error_cost(input=x, label=y) diff --git a/python/paddle/fluid/tests/unittests/test_compare_op.py b/python/paddle/fluid/tests/unittests/test_compare_op.py index fa98771ce1..55cc9d933f 100755 --- a/python/paddle/fluid/tests/unittests/test_compare_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_op.py @@ -41,9 +41,9 @@ def create_test_class(op_type, typename, callback): def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[2], dtype='int32') - y = fluid.layers.data(name='y', shape=[2], dtype='int32') - a = fluid.layers.data(name='a', shape=[2], dtype='int16') + x = paddle.static.data(name='x', shape=[-1, 2], dtype='int32') + y = paddle.static.data(name='y', shape=[-1, 2], dtype='int32') + a = paddle.static.data(name='a', shape=[-1, 2], dtype='int16') op = eval("paddle.%s" % self.op_type) self.assertRaises(TypeError, op, x=x, y=a) self.assertRaises(TypeError, op, x=a, y=y) @@ -415,8 +415,8 @@ def create_paddle_case(op_type, callback): def test_attr_name(self): paddle.enable_static() with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[4], dtype='int32') - y = fluid.layers.data(name='y', shape=[4], dtype='int32') + x = paddle.static.data(name='x', shape=[-1, 4], dtype='int32') + y = paddle.static.data(name='y', shape=[-1, 4], dtype='int32') op = eval("paddle.%s" % (self.op_type)) out = op(x=x, y=y, name="name_%s" % (self.op_type)) self.assertEqual("name_%s" % (self.op_type) in out.name, True) @@ -439,7 +439,7 @@ class TestCompareOpError(unittest.TestCase): paddle.enable_static() with program_guard(Program(), Program()): # The input x and y of compare_op must be Variable. - x = fluid.layers.data(name='x', shape=[1], dtype="float32") + x = paddle.static.data(name='x', shape=[-1, 1], dtype="float32") y = fluid.create_lod_tensor( numpy.array([[-1]]), [[1]], fluid.CPUPlace() ) diff --git a/python/paddle/fluid/tests/unittests/test_compiled_program.py b/python/paddle/fluid/tests/unittests/test_compiled_program.py index 28c3feb010..a2ea57f835 100644 --- a/python/paddle/fluid/tests/unittests/test_compiled_program.py +++ b/python/paddle/fluid/tests/unittests/test_compiled_program.py @@ -102,10 +102,10 @@ class TestCompiledProgramError(unittest.TestCase): self.assertRaises(TypeError, fluid.CompiledProgram, "program") def build_simple_model(self): - img = fluid.layers.data( - name='image', shape=[1, 28, 28], dtype='float32' + img = paddle.static.data( + name='image', shape=[-1, 1, 28, 28], dtype='float32' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') prediction = paddle.static.nn.fc(x=img, size=10, activation='softmax') loss = paddle.nn.functional.cross_entropy( input=prediction, label=label, reduction='none', use_softmax=False diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index d20b3e92c4..7d7c2ddf70 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -20,7 +20,6 @@ from decorator_helper import prog_scope import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import Program, core, program_guard from paddle.fluid.tests.unittests.op_test import ( OpTest, @@ -250,7 +249,7 @@ class TestConcatOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # The input type of concat_op should be list. - x1 = fluid.layers.data(shape=[4], dtype='int32', name='x1') + x1 = paddle.static.data(shape=[-1, 4], dtype='int32', name='x1') fluid.layers.concat(x1) # The item in input must be Variable. x2 = fluid.create_lod_tensor( @@ -261,12 +260,12 @@ class TestConcatOpError(unittest.TestCase): ) self.assertRaises(TypeError, fluid.layers.concat, [x2]) # The input dtype of concat_op must be float16, float32, float64, int32, int64. - x4 = fluid.layers.data(shape=[4], dtype='uint8', name='x4') - x5 = fluid.layers.data(shape=[4], dtype='uint8', name='x5') + x4 = paddle.static.data(shape=[-1, 4], dtype='uint8', name='x4') + x5 = paddle.static.data(shape=[-1, 4], dtype='uint8', name='x5') self.assertRaises(TypeError, fluid.layers.concat, [x4, x5]) - x6 = fluid.layers.data(shape=[4], dtype='float16', name='x6') - x7 = fluid.layers.data(shape=[4], dtype='float16', name='x7') - x8 = fluid.layers.data(shape=[4], dtype='float32', name='x8') + x6 = paddle.static.data(shape=[-1, 4], dtype='float16', name='x6') + x7 = paddle.static.data(shape=[-1, 4], dtype='float16', name='x7') + x8 = paddle.static.data(shape=[-1, 4], dtype='float32', name='x8') fluid.layers.concat([x6, x7]) # The type of axis in concat_op should be int or Variable. @@ -369,9 +368,9 @@ class TestConcatAPI(unittest.TestCase): self.assertRaises(TypeError, fluid.layers.concat, [x4, x5]) # The type of axis in concat_op should be int or Variable. - x6 = fluid.layers.data(shape=[4], dtype='float16', name='x6') - x7 = fluid.layers.data(shape=[4], dtype='float16', name='x7') - x8 = fluid.layers.data(shape=[4], dtype='float32', name='x8') + x6 = paddle.static.data(shape=[-1, 4], dtype='float16', name='x6') + x7 = paddle.static.data(shape=[-1, 4], dtype='float16', name='x7') + x8 = paddle.static.data(shape=[-1, 4], dtype='float32', name='x8') def test_axis_type(): paddle.concat([x6, x7], 3.2) @@ -457,9 +456,9 @@ class TestConcatDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data1 = layers.data('data1', [2, 3], False, dtype) + data1 = paddle.static.data('data1', [2, 3], dtype) data1.persistable = True - data2 = layers.data('data2', [2, 3], False, dtype) + data2 = paddle.static.data('data2', [2, 3], dtype) data2.persistable = True out = paddle.concat([data1, data2]) data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) @@ -498,9 +497,9 @@ class TestConcatTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data1 = layers.data('data1', [2, 3, 4], False, dtype) + data1 = paddle.static.data('data1', [2, 3, 4], dtype) data1.persistable = True - data2 = layers.data('data2', [2, 3, 4], False, dtype) + data2 = paddle.static.data('data2', [2, 3, 4], dtype) data2.persistable = True out = paddle.concat([data1, data2], 1) data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_conditional_block.py b/python/paddle/fluid/tests/unittests/test_conditional_block.py index 0aee7cadd7..0f8f6b32c9 100644 --- a/python/paddle/fluid/tests/unittests/test_conditional_block.py +++ b/python/paddle/fluid/tests/unittests/test_conditional_block.py @@ -30,7 +30,7 @@ class ConditionalBlockTest(unittest.TestCase): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - data = layers.data(name='X', shape=[1], dtype='float32') + data = paddle.static.data(name='X', shape=[-1, 1], dtype='float32') data.stop_gradient = False cond = ConditionalBlock(inputs=[data]) out = paddle.tensor.create_tensor(dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_api.py b/python/paddle/fluid/tests/unittests/test_conv2d_api.py index 16bf938d1c..0c458c1fec 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_api.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_api.py @@ -26,17 +26,15 @@ import paddle.fluid.core as core class TestConv2DAPI(unittest.TestCase): def test_api(self): - input_NHWC = fluid.layers.data( + input_NHWC = paddle.static.data( name="input_NHWC", shape=[2, 5, 5, 3], - append_batch_size=False, dtype="float32", ) - input_NCHW = fluid.layers.data( + input_NCHW = paddle.static.data( name="input_NCHW", shape=[2, 3, 5, 5], - append_batch_size=False, dtype="float32", ) @@ -120,10 +118,9 @@ class TestConv2DAPI(unittest.TestCase): class TestConv2DAPI_Error(unittest.TestCase): def test_api(self): - input = fluid.layers.data( + input = paddle.static.data( name="input", shape=[2, 5, 5, 5], - append_batch_size=False, dtype="float32", ) @@ -206,10 +203,9 @@ class TestConv2DAPI_Error(unittest.TestCase): self.assertRaises(ValueError, run_5) # ValueError: channel dimmention - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[2, 5, 5, -1], - append_batch_size=False, dtype="float32", ) @@ -293,10 +289,9 @@ class TestConv2DAPI_Error(unittest.TestCase): self.assertRaises(ValueError, run_10) def test_api_with_error_input(self): - input = fluid.layers.data( + input = paddle.static.data( name="error_input", shape=[1], - append_batch_size=False, dtype="float32", ) @@ -325,9 +320,8 @@ class TestConv2DAPI_Error(unittest.TestCase): class TestConv2DEnviron(unittest.TestCase): def run1(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - inputs = fluid.layers.data( + inputs = paddle.static.data( shape=[2, 3, 5, 5], - append_batch_size=False, name="inputs", dtype="float32", ) diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index ca7017a4ad..60c729b4f9 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -713,8 +713,8 @@ class TestConv2DOpError(unittest.TestCase): def test_dtype(): # the input dtype of conv2d must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data( - name='x2', shape=[3, 4, 5, 6], dtype="int32" + x2 = paddle.static.data( + name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32" ) paddle.static.nn.conv2d(x2, 1, 1) diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py index 2d4694be2e..afbce517f6 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py @@ -831,11 +831,11 @@ class TestCUDNNWithEvenUpsample_NHWC_FP16(TestCUDNN_FP16): class TestConv2DTransposeAPI(unittest.TestCase): def test_case1(self): - data1 = fluid.layers.data( - name='data1', shape=[3, 5, 5], dtype='float32' + data1 = paddle.static.data( + name='data1', shape=[-1, 3, 5, 5], dtype='float32' ) - data2 = fluid.layers.data( - name='data2', shape=[5, 5, 3], dtype='float32' + data2 = paddle.static.data( + name='data2', shape=[-1, 5, 5, 3], dtype='float32' ) out1 = paddle.static.nn.conv2d_transpose( input=data1, @@ -918,7 +918,9 @@ class TestConv2DTransposeAPI(unittest.TestCase): class TestConv2DTransposeOpException(unittest.TestCase): def test_exception(self): - data = fluid.layers.data(name='data', shape=[3, 5, 5], dtype="float32") + data = paddle.static.data( + name='data', shape=[-1, 3, 5, 5], dtype="float32" + ) def attr_data_format(): out = paddle.static.nn.conv2d_transpose( @@ -965,8 +967,8 @@ class TestConv2DTransposeOpException(unittest.TestCase): self.assertRaises(ValueError, attr_padding_with_data_format) - error_input = fluid.layers.data( - name='error_data', shape=[1], dtype="float32" + error_input = paddle.static.data( + name='error_data', shape=[-1, 1], dtype="float32" ) def error_input_size(): diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_op.py index dc2760307c..4930fd2567 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_op.py @@ -18,7 +18,6 @@ import numpy as np from op_test import OpTest import paddle -import paddle.fluid as fluid import paddle.fluid.core as core @@ -865,17 +864,15 @@ create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding) class TestConv3DAPI(unittest.TestCase): def test_api(self): - input_NDHWC = fluid.layers.data( + input_NDHWC = paddle.static.data( name="input_NDHWC", shape=[2, 5, 5, 5, 3], - append_batch_size=False, dtype="float32", ) - input_NCDHW = fluid.layers.data( + input_NCDHW = paddle.static.data( name="input_NCDHW", shape=[2, 3, 5, 5, 3], - append_batch_size=False, dtype="float32", ) @@ -948,10 +945,9 @@ class TestConv3DAPI(unittest.TestCase): class TestConv3DAPI_Error(unittest.TestCase): def test_api(self): - input = fluid.layers.data( + input = paddle.static.data( name="input", shape=[2, 5, 5, 5, 4], - append_batch_size=False, dtype="float32", ) @@ -1034,10 +1030,9 @@ class TestConv3DAPI_Error(unittest.TestCase): self.assertRaises(ValueError, run_5) # ValueError: channel dimmention - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[2, 5, 5, 5, -1], - append_batch_size=False, dtype="float32", ) diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_part2_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_part2_op.py index 0ad217f681..d47f341652 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_part2_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_part2_op.py @@ -86,11 +86,11 @@ class TestWithDilation_NHWC(TestConv3DTransposeOp): class TestConv3DTransposeAPI(unittest.TestCase): def test_case1(self): - data1 = fluid.layers.data( - name='data1', shape=[3, 5, 5, 5], dtype='float32' + data1 = paddle.static.data( + name='data1', shape=[-1, 3, 5, 5, 5], dtype='float32' ) - data2 = fluid.layers.data( - name='data2', shape=[5, 5, 5, 3], dtype='float32' + data2 = paddle.static.data( + name='data2', shape=[-1, 5, 5, 5, 3], dtype='float32' ) out1 = paddle.static.nn.conv3d_transpose( @@ -174,8 +174,8 @@ class TestConv3DTransposeAPI(unittest.TestCase): class TestConv3DTransposeOpException(unittest.TestCase): def test_exception(self): - data = fluid.layers.data( - name='data', shape=[3, 5, 5, 5], dtype="float32" + data = paddle.static.data( + name='data', shape=[-1, 3, 5, 5, 5], dtype="float32" ) def attr_data_format(): diff --git a/python/paddle/fluid/tests/unittests/test_conv_nn_grad.py b/python/paddle/fluid/tests/unittests/test_conv_nn_grad.py index 2bb99a00be..3dc153e58a 100644 --- a/python/paddle/fluid/tests/unittests/test_conv_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_conv_nn_grad.py @@ -21,7 +21,6 @@ from decorator_helper import prog_scope import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers class TestConvDoubleGradCheck(unittest.TestCase): @@ -30,7 +29,7 @@ class TestConvDoubleGradCheck(unittest.TestCase): shape = [2, 4, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d(x, 2, 1, groups=1, bias_attr=False) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -57,7 +56,7 @@ class TestConvDoubleGradCheckTest0(unittest.TestCase): shape = [2, 4, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d(x, 2, 1, bias_attr=False) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -83,7 +82,7 @@ class TestConvDoubleGradCheckTest1(unittest.TestCase): shape = [2, 3, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d(x, 2, 1, padding=1, bias_attr=False) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -109,7 +108,7 @@ class TestConv3DDoubleGradCheck(unittest.TestCase): shape = [2, 4, 3, 4, 2] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d(x, 2, 1, bias_attr=False) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -136,7 +135,7 @@ class TestConv3DDoubleGradCheckTest1(unittest.TestCase): shape = [2, 4, 5, 3, 2] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d(x, 2, 1, padding=1, bias_attr=False) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -162,7 +161,7 @@ class TestConv2DoubleGradCheck_AsyPadding(unittest.TestCase): shape = [2, 2, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d( input=x, num_filters=2, @@ -195,7 +194,7 @@ class TestConv2DoubleGradCheck_PaddingSAME(unittest.TestCase): shape = [2, 2, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d( input=x, num_filters=2, @@ -228,7 +227,7 @@ class TestConv2DoubleGradCheck_PaddingVALID(unittest.TestCase): shape = [2, 2, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d( input=x, num_filters=2, @@ -261,7 +260,7 @@ class TestConv2DoubleGradCheck_ChannelLast(unittest.TestCase): shape = [2, 2, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d( input=x, num_filters=2, @@ -296,7 +295,7 @@ class TestConv2DoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase): shape = [2, 2, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d( input=x, num_filters=2, @@ -331,7 +330,7 @@ class TestConv3DDoubleGradCheck_AsyPadding(unittest.TestCase): shape = [2, 2, 2, 2, 2] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d( input=x, num_filters=2, @@ -364,7 +363,7 @@ class TestConv3DoubleGradCheck_PaddingSAME(unittest.TestCase): shape = [2, 2, 2, 2, 2] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d( input=x, num_filters=2, @@ -398,7 +397,7 @@ class TestConv3DoubleGradCheck_PaddingVALID(unittest.TestCase): shape = [2, 2, 3, 3, 2] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d( input=x, num_filters=2, @@ -431,7 +430,7 @@ class TestConv3DDoubleGradCheck_ChannelLast(unittest.TestCase): shape = [2, 2, 2, 2, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d( input=x, num_filters=2, @@ -466,7 +465,7 @@ class TestConv3DDoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase): shape = [2, 2, 2, 2, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d( input=x, num_filters=2, @@ -501,7 +500,7 @@ class TestDepthWiseConvDoubleGradCheck(unittest.TestCase): shape = [2, 4, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) # condition of depthwise conv: # use_cudnn == False @@ -538,8 +537,8 @@ class TestDepthWiseConvDoubleGradCheckCase1(unittest.TestCase): w_shape = [4, 1, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', x_shape, False, dtype) - w = layers.data('w', w_shape, False, dtype) + x = paddle.static.data('x', x_shape, dtype) + w = paddle.static.data('w', w_shape, dtype) # condition of depthwise conv: # use_cudnn == False @@ -579,8 +578,8 @@ class TestConv3DDoubleGradCheck_NN(unittest.TestCase): w_shape = [6, 3, 3, 3, 3] eps = 0.005 dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 - x = layers.data('x', x_shape, False, dtype) - w = layers.data('w', w_shape, False, dtype) + x = paddle.static.data('x', x_shape, dtype) + w = paddle.static.data('w', w_shape, dtype) x.persistable = True w.persistable = True y = paddle.nn.functional.conv3d(x, w) diff --git a/python/paddle/fluid/tests/unittests/test_conv_transpose_nn_grad.py b/python/paddle/fluid/tests/unittests/test_conv_transpose_nn_grad.py index 142359286b..34838dcd32 100644 --- a/python/paddle/fluid/tests/unittests/test_conv_transpose_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_conv_transpose_nn_grad.py @@ -21,7 +21,6 @@ from decorator_helper import prog_scope import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers class TestConvTransposeDoubleGradCheck(unittest.TestCase): @@ -35,7 +34,7 @@ class TestConvTransposeDoubleGradCheck(unittest.TestCase): dtype = np.float64 if core.is_compiled_with_rocm(): dtype = np.float32 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d_transpose( x, 2, filter_size=1, groups=1, bias_attr=False ) @@ -91,7 +90,7 @@ class TestConvTranspose2DoubleGradCheck_AsyPadding( dtype = np.float64 if core.is_compiled_with_rocm(): dtype = np.float32 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d_transpose( input=x, num_filters=2, @@ -144,7 +143,7 @@ class TestConvTranspose2DoubleGradCheck_PaddingSAME( dtype = np.float64 if core.is_compiled_with_rocm(): dtype = np.float32 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d_transpose( input=x, num_filters=2, @@ -197,7 +196,7 @@ class TestConvTranspose2DoubleGradCheck_PaddingVALID( dtype = np.float64 if core.is_compiled_with_rocm(): dtype = np.float32 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d_transpose( input=x, num_filters=2, @@ -250,7 +249,7 @@ class TestConvTranspose2DoubleGradCheck_ChannelLast( dtype = np.float64 if core.is_compiled_with_rocm(): dtype = np.float32 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d_transpose( input=x, num_filters=2, diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py index a1f650dc63..ec5e5dbaa8 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py @@ -429,11 +429,11 @@ class TestCrossEntropyOpError(unittest.TestCase): def test_dtype(): # the input dtype of cross_entropy must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data( - name='x2', shape=[3, 4, 5, 6], dtype="int32" + x2 = paddle.static.data( + name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32" ) - lab2 = fluid.layers.data( - name='lab2', shape=[3, 4, 5, 6], dtype="int32" + lab2 = paddle.static.data( + name='lab2', shape=[-1, 3, 4, 5, 6], dtype="int32" ) paddle.nn.functional.cross_entropy( x2, lab2, reduction='none', use_softmax=False diff --git a/python/paddle/fluid/tests/unittests/test_cross_op.py b/python/paddle/fluid/tests/unittests/test_cross_op.py index 6cc366b85c..29bdf93cf1 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_op.py @@ -69,18 +69,18 @@ class TestCrossAPI(unittest.TestCase): def input_data(self): self.data_x = np.array( [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]] - ) + ).astype('float32') self.data_y = np.array( [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]] - ) + ).astype('float32') def test_cross_api(self): self.input_data() # case 1: with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 3]) - y = fluid.layers.data(name='y', shape=[-1, 3]) + x = paddle.static.data(name='x', shape=[-1, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[-1, 3], dtype="float32") z = paddle.cross(x, y, axis=1) exe = fluid.Executor(fluid.CPUPlace()) (res,) = exe.run( @@ -95,8 +95,8 @@ class TestCrossAPI(unittest.TestCase): # case 2: with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 3]) - y = fluid.layers.data(name='y', shape=[-1, 3]) + x = paddle.static.data(name='x', shape=[-1, 3], dtype="float32") + y = paddle.static.data(name='y', shape=[-1, 3], dtype="float32") z = paddle.cross(x, y) exe = fluid.Executor(fluid.CPUPlace()) (res,) = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_data.py b/python/paddle/fluid/tests/unittests/test_data.py index 25b2372e81..0e2223767d 100644 --- a/python/paddle/fluid/tests/unittests/test_data.py +++ b/python/paddle/fluid/tests/unittests/test_data.py @@ -17,7 +17,6 @@ import unittest import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.fluid import Program, program_guard @@ -37,21 +36,6 @@ class TestApiDataError(unittest.TestCase): self.assertRaises(TypeError, test_shape_type) - def test_layers_data(self): - with program_guard(Program(), Program()): - - # 1. The type of 'name' in layers.data must be str. - def test_name_type(): - layers.data(name=1, shape=[2, 25], dtype="bool") - - self.assertRaises(TypeError, test_name_type) - - # 2. The type of 'shape' in layers.data must be list or tuple. - def test_shape_type(): - layers.data(name='data1', shape=2, dtype="bool") - - self.assertRaises(TypeError, test_shape_type) - class TestApiStaticDataError(unittest.TestCase): def test_fluid_dtype(self): @@ -81,16 +65,15 @@ class TestApiStaticDataError(unittest.TestCase): self.assertRaises(TypeError, test_shape_type) - def test_layers_data(self): with program_guard(Program(), Program()): - # 1. The type of 'name' in layers.data must be str. + # 1. The type of 'name' in paddle.static.data must be str. def test_name_type(): paddle.static.data(name=1, shape=[2, 25], dtype="bool") self.assertRaises(TypeError, test_name_type) - # 2. The type of 'shape' in layers.data must be list or tuple. + # 2. The type of 'shape' in paddle.static.data must be list or tuple. def test_shape_type(): paddle.static.data(name='data1', shape=2, dtype="bool") @@ -102,9 +85,6 @@ class TestApiErrorWithDynamicMode(unittest.TestCase): with program_guard(Program(), Program()): paddle.disable_static() self.assertRaises(AssertionError, fluid.data, 'a', [2, 25]) - self.assertRaises( - AssertionError, fluid.layers.data, 'b', shape=[2, 25] - ) self.assertRaises( AssertionError, paddle.static.data, 'c', shape=[2, 25] ) diff --git a/python/paddle/fluid/tests/unittests/test_data_norm_op.py b/python/paddle/fluid/tests/unittests/test_data_norm_op.py index 2b84f2b568..37fd990273 100644 --- a/python/paddle/fluid/tests/unittests/test_data_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_data_norm_op.py @@ -19,7 +19,6 @@ import numpy as np from op_test import OpTest import paddle -import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import Program, program_guard from paddle.fluid.op import Operator @@ -518,7 +517,7 @@ class TestDataNormOpWithSlotDim(OpTest): class TestDataNormOpErrorr(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x2 = fluid.layers.data(name='x2', shape=[3, 4], dtype="int32") + x2 = paddle.static.data(name='x2', shape=[-1, 3, 4], dtype="int32") # self.assertRaises(TypeError, fluid.data_norm, x2) paddle.static.nn.data_norm( input=x2, param_attr={}, enable_scale_and_shift=True diff --git a/python/paddle/fluid/tests/unittests/test_dataset.py b/python/paddle/fluid/tests/unittests/test_dataset.py index fb8c9ff6e5..f98193ea64 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset.py +++ b/python/paddle/fluid/tests/unittests/test_dataset.py @@ -100,8 +100,8 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data( - name=slot, shape=[1], dtype="int64", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="int64", lod_level=1 ) slots_vars.append(var) @@ -192,8 +192,8 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data( - name=slot, shape=[1], dtype="int64", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="int64", lod_level=1 ) slots_vars.append(var) @@ -257,8 +257,8 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data( - name=slot, shape=[1], dtype="int64", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="int64", lod_level=1 ) slots_vars.append(var) @@ -350,13 +350,13 @@ class TestDataset(unittest.TestCase): startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): for slot in slots[:2]: - var = fluid.layers.data( - name=slot, shape=[1], dtype="int64", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="int64", lod_level=1 ) slots_vars.append(var) for slot in slots[2:]: - var = fluid.layers.data( - name=slot, shape=[1], dtype="float32", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="float32", lod_level=1 ) slots_vars.append(var) @@ -425,17 +425,17 @@ class TestDataset(unittest.TestCase): train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - var1 = fluid.layers.data( - name="slot1", shape=[1], dtype="int64", lod_level=0 + var1 = paddle.static.data( + name="slot1", shape=[-1, 1], dtype="int64", lod_level=0 ) - var2 = fluid.layers.data( - name="slot2", shape=[1], dtype="int64", lod_level=0 + var2 = paddle.static.data( + name="slot2", shape=[-1, 1], dtype="int64", lod_level=0 ) - var3 = fluid.layers.data( - name="slot3", shape=[1], dtype="float32", lod_level=0 + var3 = paddle.static.data( + name="slot3", shape=[-1, 1], dtype="float32", lod_level=0 ) - var4 = fluid.layers.data( - name="slot4", shape=[1], dtype="float32", lod_level=0 + var4 = paddle.static.data( + name="slot4", shape=[-1, 1], dtype="float32", lod_level=0 ) slots_vars = [var1, var2, var3, var4] @@ -498,8 +498,8 @@ class TestDataset(unittest.TestCase): slots = ["slot1_f", "slot2_f", "slot3_f", "slot4_f"] slots_vars = [] for slot in slots: - var = fluid.layers.data( - name=slot, shape=[1], dtype="float32", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="float32", lod_level=1 ) slots_vars.append(var) @@ -614,8 +614,8 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data( - name=slot, shape=[1], dtype="int64", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="int64", lod_level=1 ) slots_vars.append(var) @@ -682,8 +682,8 @@ class TestDataset(unittest.TestCase): slots = ["slot1_f", "slot2_f", "slot3_f", "slot4_f"] slots_vars = [] for slot in slots: - var = fluid.layers.data( - name=slot, shape=[1], dtype="float32", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="float32", lod_level=1 ) slots_vars.append(var) @@ -807,8 +807,8 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data( - name=slot, shape=[1], dtype="int64", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="int64", lod_level=1 ) slots_vars.append(var) @@ -872,8 +872,8 @@ class TestDataset(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data( - name=slot, shape=[1], dtype="int64", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="int64", lod_level=1 ) slots_vars.append(var) @@ -944,8 +944,8 @@ class TestDatasetWithFetchHandler(unittest.TestCase): slots_vars = [] poolings = [] for slot in slots: - data = fluid.layers.data( - name=slot, shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name=slot, shape=[-1, 1], dtype="int64", lod_level=1 ) var = fluid.layers.cast(x=data, dtype='float32') pool = fluid.layers.sequence_pool(input=var, pool_type='AVERAGE') @@ -1117,8 +1117,8 @@ class TestDataset2(unittest.TestCase): slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"] slots_vars = [] for slot in slots: - var = fluid.layers.data( - name=slot, shape=[1], dtype="float32", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="float32", lod_level=1 ) slots_vars.append(var) fake_cost = paddle.subtract(slots_vars[0], slots_vars[-1]) @@ -1187,8 +1187,8 @@ class TestDataset2(unittest.TestCase): slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"] slots_vars = [] for slot in slots: - var = fluid.layers.data( - name=slot, shape=[1], dtype="float32", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="float32", lod_level=1 ) slots_vars.append(var) fake_cost = paddle.subtract(slots_vars[0], slots_vars[-1]) @@ -1318,8 +1318,8 @@ class TestDataset2(unittest.TestCase): slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"] slots_vars = [] for slot in slots: - var = fluid.layers.data( - name=slot, shape=[1], dtype="float32", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="float32", lod_level=1 ) slots_vars.append(var) fake_cost = paddle.subtract(slots_vars[0], slots_vars[-1]) diff --git a/python/paddle/fluid/tests/unittests/test_dataset_consistency_inspection.py b/python/paddle/fluid/tests/unittests/test_dataset_consistency_inspection.py index 63814b4682..1a8d4de560 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset_consistency_inspection.py +++ b/python/paddle/fluid/tests/unittests/test_dataset_consistency_inspection.py @@ -394,12 +394,11 @@ class TestDataset(unittest.TestCase): f.write(data) slot_data = [] - label = fluid.layers.data( + label = paddle.static.data( name="click", shape=[-1, 1], dtype="int64", lod_level=0, - append_batch_size=False, ) slot_data.append(label) @@ -407,56 +406,65 @@ class TestDataset(unittest.TestCase): len_sparse_query = 19 for feat_name in range(1, len_sparse_query + 1): slot_data.append( - fluid.layers.data( - name=str(feat_name), shape=[1], dtype='int64', lod_level=1 + paddle.static.data( + name=str(feat_name), + shape=[-1, 1], + dtype='int64', + lod_level=1, ) ) # sparse_url_feat_names for feat_name in range(len_sparse_query + 1, len_sparse_query + 5): slot_data.append( - fluid.layers.data( - name=str(feat_name), shape=[1], dtype='int64', lod_level=1 + paddle.static.data( + name=str(feat_name), + shape=[-1, 1], + dtype='int64', + lod_level=1, ) ) # dense_feat_names for feat_name in range(len_sparse_query + 5, len_sparse_query + 16): slot_data.append( - fluid.layers.data( - name=str(feat_name), shape=[1], dtype='float32' + paddle.static.data( + name=str(feat_name), shape=[-1, 1], dtype='float32' ) ) # context_feat_namess for feat_name in range(len_sparse_query + 16, len_sparse_query + 18): slot_data.append( - fluid.layers.data( - name=str(feat_name), shape=[1], dtype='float32' + paddle.static.data( + name=str(feat_name), shape=[-1, 1], dtype='float32' ) ) # neg sparse_url_feat_names for feat_name in range(len_sparse_query + 18, len_sparse_query + 22): slot_data.append( - fluid.layers.data( - name=str(feat_name), shape=[1], dtype='int64', lod_level=1 + paddle.static.data( + name=str(feat_name), + shape=[-1, 1], + dtype='int64', + lod_level=1, ) ) # neg dense_feat_names for feat_name in range(len_sparse_query + 22, len_sparse_query + 33): slot_data.append( - fluid.layers.data( - name=str(feat_name), shape=[1], dtype='float32' + paddle.static.data( + name=str(feat_name), shape=[-1, 1], dtype='float32' ) ) # neg context_feat_namess for feat_name in range(len_sparse_query + 33, len_sparse_query + 35): slot_data.append( - fluid.layers.data( - name=str(feat_name), shape=[1], dtype='float32' + paddle.static.data( + name=str(feat_name), shape=[-1, 1], dtype='float32' ) ) diff --git a/python/paddle/fluid/tests/unittests/test_dataset_dataloader.py b/python/paddle/fluid/tests/unittests/test_dataset_dataloader.py index 37443f6aa5..078ffb4e86 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset_dataloader.py +++ b/python/paddle/fluid/tests/unittests/test_dataset_dataloader.py @@ -86,11 +86,11 @@ class DatasetLoaderTestBase(unittest.TestCase): main_prog = fluid.Program() startup_prog = fluid.Program() with fluid.program_guard(main_prog, startup_prog): - image = fluid.layers.data( - name='image', shape=IMAGE_SHAPE, dtype='float32' + image = paddle.static.data( + name='image', shape=[-1] + IMAGE_SHAPE, dtype='float32' ) - label = fluid.layers.data( - name='label', shape=LABEL_SHAPE, dtype='int64' + label = paddle.static.data( + name='label', shape=[-1] + LABEL_SHAPE, dtype='int64' ) simple_fc_net_with_inputs(image, label) diff --git a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py index 6f417973ee..e02282cb9b 100644 --- a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py +++ b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py @@ -42,10 +42,12 @@ def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): - image = fluid.layers.data( - name='image', shape=[784], dtype='float32' + image = paddle.static.data( + name='image', shape=[-1, 784], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') py_reader = fluid.io.PyReader( feed_list=[image, label], capacity=4, diff --git a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py index 1a7118cfb8..53ef9b02cc 100644 --- a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py +++ b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py @@ -50,11 +50,11 @@ class TestClass(unittest.TestCase): main_prog = fluid.Program() startup_prog = fluid.Program() with fluid.program_guard(main_prog, startup_prog): - img = fluid.layers.data( - shape=img_shape, dtype='float32', name='image' + img = paddle.static.data( + shape=[-1] + img_shape, dtype='float32', name='image' ) - label = fluid.layers.data( - shape=label_shape, dtype='int64', name='label' + label = paddle.static.data( + shape=[-1] + label_shape, dtype='int64', name='label' ) feeder = fluid.DataFeeder(feed_list=[img, label], place=p) diff --git a/python/paddle/fluid/tests/unittests/test_desc_clone.py b/python/paddle/fluid/tests/unittests/test_desc_clone.py index ed1f9a9aaf..ecb49c3172 100644 --- a/python/paddle/fluid/tests/unittests/test_desc_clone.py +++ b/python/paddle/fluid/tests/unittests/test_desc_clone.py @@ -68,8 +68,10 @@ def cnn_model(data): def get_model(batch_size): # Input data - images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + images = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE + ) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') # Train program predict = cnn_model(images) @@ -186,7 +188,7 @@ class TestCloneWithStopGradient(unittest.TestCase): train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - img = fluid.layers.data(name='image', shape=[784]) + img = paddle.static.data(name='image', shape=[-1, 784]) hidden1 = paddle.static.nn.fc(x=img, size=200, activation='relu') hidden1.stop_gradient = True hidden2 = paddle.nn.functional.dropout(hidden1, p=0.5) @@ -194,7 +196,9 @@ class TestCloneWithStopGradient(unittest.TestCase): input=paddle.static.nn.fc( hidden2, size=10, activation='softmax' ), - label=fluid.layers.data(name='label', shape=[1], dtype='int64'), + label=paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' + ), reduction='none', use_softmax=False, ) @@ -214,7 +218,7 @@ class TestCloneWithStopGradientInSubBlock(unittest.TestCase): train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - img = fluid.layers.data(name='image', shape=[784]) + img = paddle.static.data(name='image', shape=[-1, 784]) true = paddle.ones(shape=[1], dtype="float32") hidden1 = paddle.static.nn.fc(x=img, size=200, activation='relu') hidden1.stop_gradient = True @@ -236,7 +240,9 @@ class TestCloneWithStopGradientInSubBlock(unittest.TestCase): input=paddle.static.nn.fc( hidden2, size=10, activation='softmax' ), - label=fluid.layers.data(name='label', shape=[1], dtype='int64'), + label=paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' + ), reduction='none', use_softmax=False, ) @@ -259,7 +265,7 @@ class TestCloneWithRaise(unittest.TestCase): train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - img = fluid.layers.data(name='image', shape=[784]) + img = paddle.static.data(name='image', shape=[-1, 784]) true = paddle.ones(shape=[1], dtype="float32") hidden1 = paddle.static.nn.fc(x=img, size=200, activation='relu') hidden1.stop_gradient = True @@ -280,7 +286,9 @@ class TestCloneWithRaise(unittest.TestCase): input=paddle.static.nn.fc( hidden2, size=10, activation='softmax' ), - label=fluid.layers.data(name='label', shape=[1], dtype='int64'), + label=paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' + ), reduction='none', use_softmax=False, ) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_async.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_async.py index c80e1a68fd..cb58970e88 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_async.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_async.py @@ -45,8 +45,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): fleet.init(role_maker.PaddleCloudRoleMaker()) - x = paddle.fluid.layers.data(name='x', shape=[1], dtype='float32') - y = paddle.fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=x, label=y) avg_cost = paddle.mean(cost) @@ -83,8 +83,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): fleet.init(role_maker.PaddleCloudRoleMaker()) - x = paddle.fluid.layers.data(name='x', shape=[1], dtype='float32') - y = paddle.fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=x, label=y) avg_cost = paddle.mean(cost) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py index 0803193041..d0521a5918 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py @@ -44,10 +44,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): paddle.fluid.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py index 725c2559db..c560dfa8db 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py @@ -46,12 +46,11 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): paddle.fluid.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) - input_x = paddle.fluid.layers.data( + input_x = paddle.static.data( name="x", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) x_embedding = paddle.fluid.layers.embedding( is_distributed=False, @@ -63,7 +62,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): ), is_sparse=True, ) - input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=x_embedding, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py index c25e60793f..9eac239742 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py @@ -46,8 +46,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): fleet.init(role_maker.PaddleCloudRoleMaker()) - input_x = paddle.fluid.layers.data(name="x", shape=[1], dtype='int64') - input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1, 1], dtype='int64') + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') emb = paddle.fluid.layers.embedding( input=input_x, size=[100, 10], is_sparse=True diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py index 3832fd5de2..c8470f64eb 100755 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py @@ -43,10 +43,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): paddle.fluid.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') @@ -76,10 +74,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): paddle.fluid.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_sync.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_sync.py index fdaa0a69c8..f1ff439112 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_sync.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_sync.py @@ -38,8 +38,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): def test_gradient_merge_optimizer(self): fleet.init(role_maker.PaddleCloudRoleMaker()) - x = paddle.fluid.layers.data(name='x', shape=[1], dtype='float32') - y = paddle.fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=x, label=y) avg_cost = paddle.mean(cost) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_program.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_program.py index 376a0d087e..bc17b0d67f 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_program.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_program.py @@ -65,18 +65,18 @@ class TestDistFleetHeterProgram(unittest.TestCase): return self.strategy def build_input(self): - dense_input = fluid.layers.data( - name="dense_input", shape=[10], dtype="float32" + dense_input = paddle.static.data( + name="dense_input", shape=[-1, 10], dtype="float32" ) sparse_input_ids = [ - fluid.layers.data( - name="C" + str(i), shape=[1], lod_level=1, dtype="int64" + paddle.static.data( + name="C" + str(i), shape=[-1, 1], lod_level=1, dtype="int64" ) for i in range(1, 27) ] - label = fluid.layers.data(name="label", shape=[1], dtype="float32") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="float32") inputs = [dense_input] + sparse_input_ids + [label] return inputs diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_minimize.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_minimize.py index 472f8a6ced..b60ff0db63 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_minimize.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_minimize.py @@ -69,7 +69,9 @@ class TestPSMinimize(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data(name="1", shape=[1], dtype="int64", lod_level=1) + q = paddle.static.data( + name="1", shape=[-1, 1], dtype="int64", lod_level=1 + ) # embedding q_emb = fluid.contrib.layers.sparse_embedding( input=q, @@ -95,9 +97,11 @@ class TestPSMinimize(unittest.TestCase): ), ) # label data - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") # pt - pt = fluid.layers.data(name="2", shape=[1], dtype="int64", lod_level=1) + pt = paddle.static.data( + name="2", shape=[-1, 1], dtype="int64", lod_level=1 + ) # embedding pt_emb = fluid.contrib.layers.sparse_embedding( input=pt, @@ -124,7 +128,9 @@ class TestPSMinimize(unittest.TestCase): bias_attr=fluid.ParamAttr(name="__fc_b__"), ) # nt - nt = fluid.layers.data(name="3", shape=[1], dtype="int64", lod_level=1) + nt = paddle.static.data( + name="3", shape=[-1, 1], dtype="int64", lod_level=1 + ) # embedding nt_emb = fluid.contrib.layers.sparse_embedding( input=nt, diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py index c879875f6f..a330b45b52 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py @@ -69,8 +69,8 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data( - name="query_ids", shape=[1], dtype="int64", lod_level=1 + q = paddle.static.data( + name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding q_emb = fluid.layers.embedding( @@ -99,10 +99,10 @@ class TestPSPassWithBow(unittest.TestCase): ), ) # label data - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") # pt - pt = fluid.layers.data( - name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + pt = paddle.static.data( + name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding pt_emb = fluid.layers.embedding( @@ -132,8 +132,8 @@ class TestPSPassWithBow(unittest.TestCase): bias_attr=fluid.ParamAttr(name="__fc_b__"), ) # nt - nt = fluid.layers.data( - name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + nt = paddle.static.data( + name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding nt_emb = fluid.layers.embedding( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py index 668b64d193..2143dc94d3 100755 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py @@ -69,7 +69,9 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data(name="1", shape=[1], dtype="int64", lod_level=1) + q = paddle.static.data( + name="1", shape=[-1, 1], dtype="int64", lod_level=1 + ) # embedding q_emb = fluid.contrib.layers.sparse_embedding( input=q, @@ -95,9 +97,11 @@ class TestPSPassWithBow(unittest.TestCase): ), ) # label data - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") # pt - pt = fluid.layers.data(name="2", shape=[1], dtype="int64", lod_level=1) + pt = paddle.static.data( + name="2", shape=[-1, 1], dtype="int64", lod_level=1 + ) # embedding pt_emb = fluid.contrib.layers.sparse_embedding( input=pt, @@ -124,7 +128,9 @@ class TestPSPassWithBow(unittest.TestCase): bias_attr=fluid.ParamAttr(name="__fc_b__"), ) # nt - nt = fluid.layers.data(name="3", shape=[1], dtype="int64", lod_level=1) + nt = paddle.static.data( + name="3", shape=[-1, 1], dtype="int64", lod_level=1 + ) # embedding nt_emb = fluid.contrib.layers.sparse_embedding( input=nt, @@ -204,8 +210,8 @@ class TestPSPassWithBow(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data( - name=slot, shape=[1], dtype="int64", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="int64", lod_level=1 ) slots_vars.append(var) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py index 5aa14fba6a..bee3cd9eb2 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py @@ -72,7 +72,9 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data(name="1", shape=[1], dtype="int64", lod_level=1) + q = paddle.static.data( + name="1", shape=[-1, 1], dtype="int64", lod_level=1 + ) # embedding q_emb = fluid.contrib.layers.sparse_embedding( input=q, @@ -98,9 +100,11 @@ class TestPSPassWithBow(unittest.TestCase): ), ) # label data - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") # pt - pt = fluid.layers.data(name="2", shape=[1], dtype="int64", lod_level=1) + pt = paddle.static.data( + name="2", shape=[-1, 1], dtype="int64", lod_level=1 + ) # embedding pt_emb = fluid.contrib.layers.sparse_embedding( input=pt, @@ -127,7 +131,9 @@ class TestPSPassWithBow(unittest.TestCase): bias_attr=fluid.ParamAttr(name="__fc_b__"), ) # nt - nt = fluid.layers.data(name="3", shape=[1], dtype="int64", lod_level=1) + nt = paddle.static.data( + name="3", shape=[-1, 1], dtype="int64", lod_level=1 + ) # embedding nt_emb = fluid.contrib.layers.sparse_embedding( input=nt, diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py index 8ecb4e2a2a..58248d325b 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py @@ -73,8 +73,8 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data( - name="query_ids", shape=[1], dtype="int64", lod_level=1 + q = paddle.static.data( + name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding q_emb = fluid.contrib.layers.sparse_embedding( @@ -101,10 +101,10 @@ class TestPSPassWithBow(unittest.TestCase): ), ) # label data - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") # pt - pt = fluid.layers.data( - name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + pt = paddle.static.data( + name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding pt_emb = fluid.contrib.layers.sparse_embedding( @@ -132,8 +132,8 @@ class TestPSPassWithBow(unittest.TestCase): bias_attr=fluid.ParamAttr(name="__fc_b__"), ) # nt - nt = fluid.layers.data( - name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + nt = paddle.static.data( + name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding nt_emb = fluid.contrib.layers.sparse_embedding( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py index d8dfcda35e..e207fb859d 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py @@ -72,8 +72,8 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data( - name="query_ids", shape=[1], dtype="int64", lod_level=1 + q = paddle.static.data( + name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding q_emb = fluid.contrib.layers.sparse_embedding( @@ -101,10 +101,10 @@ class TestPSPassWithBow(unittest.TestCase): ), ) # label data - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") # pt - pt = fluid.layers.data( - name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + pt = paddle.static.data( + name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding pt_emb = fluid.contrib.layers.sparse_embedding( @@ -132,8 +132,8 @@ class TestPSPassWithBow(unittest.TestCase): bias_attr=fluid.ParamAttr(name="__fc_b__"), ) # nt - nt = fluid.layers.data( - name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + nt = paddle.static.data( + name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding nt_emb = fluid.contrib.layers.sparse_embedding( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py index c4517cc284..4093fc34cc 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py @@ -69,8 +69,8 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = False # query - q = fluid.layers.data( - name="query_ids", shape=[1], dtype="int64", lod_level=1 + q = paddle.static.data( + name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding q_emb = fluid.layers.embedding( @@ -99,10 +99,10 @@ class TestPSPassWithBow(unittest.TestCase): ), ) # label data - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") # pt - pt = fluid.layers.data( - name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + pt = paddle.static.data( + name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding pt_emb = fluid.layers.embedding( @@ -132,8 +132,8 @@ class TestPSPassWithBow(unittest.TestCase): bias_attr=fluid.ParamAttr(name="__fc_b__"), ) # nt - nt = fluid.layers.data( - name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + nt = paddle.static.data( + name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding nt_emb = fluid.layers.embedding( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py index 93c2d48f89..025b3e90b3 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py @@ -69,8 +69,8 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data( - name="query_ids", shape=[1], dtype="int64", lod_level=1 + q = paddle.static.data( + name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding q_emb = fluid.contrib.layers.sparse_embedding( @@ -97,10 +97,10 @@ class TestPSPassWithBow(unittest.TestCase): ), ) # label data - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") # pt - pt = fluid.layers.data( - name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + pt = paddle.static.data( + name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding pt_emb = fluid.contrib.layers.sparse_embedding( @@ -128,8 +128,8 @@ class TestPSPassWithBow(unittest.TestCase): bias_attr=fluid.ParamAttr(name="__fc_b__"), ) # nt - nt = fluid.layers.data( - name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + nt = paddle.static.data( + name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding nt_emb = fluid.contrib.layers.sparse_embedding( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py index ca69a778aa..51bf54b324 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py @@ -69,8 +69,8 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data( - name="query_ids", shape=[1], dtype="int64", lod_level=1 + q = paddle.static.data( + name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding q_emb = fluid.layers.embedding( @@ -99,10 +99,10 @@ class TestPSPassWithBow(unittest.TestCase): ), ) # label data - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") # pt - pt = fluid.layers.data( - name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + pt = paddle.static.data( + name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding pt_emb = fluid.layers.embedding( @@ -132,8 +132,8 @@ class TestPSPassWithBow(unittest.TestCase): bias_attr=fluid.ParamAttr(name="__fc_b__"), ) # nt - nt = fluid.layers.data( - name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + nt = paddle.static.data( + name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding nt_emb = fluid.layers.embedding( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py index ab6bb7198c..165a8b6240 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py @@ -69,8 +69,8 @@ class TestPSPassWithBow(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data( - name="query_ids", shape=[1], dtype="int64", lod_level=1 + q = paddle.static.data( + name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding q_emb = fluid.contrib.layers.sparse_embedding( @@ -97,10 +97,10 @@ class TestPSPassWithBow(unittest.TestCase): ), ) # label data - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") # pt - pt = fluid.layers.data( - name="pos_title_ids", shape=[1], dtype="int64", lod_level=1 + pt = paddle.static.data( + name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding pt_emb = fluid.contrib.layers.sparse_embedding( @@ -128,8 +128,8 @@ class TestPSPassWithBow(unittest.TestCase): bias_attr=fluid.ParamAttr(name="__fc_b__"), ) # nt - nt = fluid.layers.data( - name="neg_title_ids", shape=[1], dtype="int64", lod_level=1 + nt = paddle.static.data( + name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 ) # embedding nt_emb = fluid.contrib.layers.sparse_embedding( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_sparse_embedding_ctr.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_sparse_embedding_ctr.py index 57f4615f7c..517232fa54 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_sparse_embedding_ctr.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_sparse_embedding_ctr.py @@ -189,26 +189,23 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase): """ dnn_input_dim, lr_input_dim = 10, 10 - dnn_data = fluid.layers.data( + dnn_data = paddle.static.data( name="dnn_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - lr_data = fluid.layers.data( + lr_data = paddle.static.data( name="lr_data", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) - label = fluid.layers.data( + label = paddle.static.data( name="click", shape=[-1, 1], dtype="int64", lod_level=0, - append_batch_size=False, ) datas = [dnn_data, lr_data, label] diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_spmt.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_spmt.py index 72068108d2..ba6e67a035 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_spmt.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_spmt.py @@ -67,7 +67,9 @@ class TestSPMT(unittest.TestCase): is_sparse = True # query - q = fluid.layers.data(name="1", shape=[1], dtype="int64", lod_level=1) + q = paddle.static.data( + name="1", shape=[-1, 1], dtype="int64", lod_level=1 + ) # embedding q_emb = fluid.contrib.layers.sparse_embedding( input=q, @@ -93,9 +95,11 @@ class TestSPMT(unittest.TestCase): ), ) # label data - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") # pt - pt = fluid.layers.data(name="2", shape=[1], dtype="int64", lod_level=1) + pt = paddle.static.data( + name="2", shape=[-1, 1], dtype="int64", lod_level=1 + ) # embedding pt_emb = fluid.contrib.layers.sparse_embedding( input=pt, @@ -122,7 +126,9 @@ class TestSPMT(unittest.TestCase): bias_attr=fluid.ParamAttr(name="__fc_b__"), ) # nt - nt = fluid.layers.data(name="3", shape=[1], dtype="int64", lod_level=1) + nt = paddle.static.data( + name="3", shape=[-1, 1], dtype="int64", lod_level=1 + ) # embedding nt_emb = fluid.contrib.layers.sparse_embedding( input=nt, diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_trainer_desc_config.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_trainer_desc_config.py index b13e2b8171..e954b56d7f 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_trainer_desc_config.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_trainer_desc_config.py @@ -39,8 +39,8 @@ class TestDistStrategyTrainerDescConfig(unittest.TestCase): fleet.init(role_maker.PaddleCloudRoleMaker()) - x = paddle.fluid.layers.data(name='x', shape=[1], dtype='float32') - y = paddle.fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=x, label=y) avg_cost = paddle.mean(cost) diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist_fleetapi.py b/python/paddle/fluid/tests/unittests/test_dist_mnist_fleetapi.py index 0749139be8..08d9c52c68 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist_fleetapi.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist_fleetapi.py @@ -55,7 +55,7 @@ class FleetCollectiveTest(unittest.TestCase): # Operator "gen_nccl_id" has not been registered return - data = fluid.layers.data(name='X', shape=[1], dtype='float32') + data = paddle.static.data(name='X', shape=[-1, 1], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) diff --git a/python/paddle/fluid/tests/unittests/test_dist_train.py b/python/paddle/fluid/tests/unittests/test_dist_train.py index 5fa4f87f95..828b07baf7 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_train.py +++ b/python/paddle/fluid/tests/unittests/test_dist_train.py @@ -23,7 +23,6 @@ from dist_test_utils import remove_ps_flag import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers import paddle.fluid.layers.ops as ops from paddle.fluid import core from paddle.fluid.layers.io import ListenAndServ, Recv, Send @@ -83,11 +82,10 @@ class TestSendOp(unittest.TestCase): dtype="float32", shape=[32, 32], ) - x = layers.data( + x = paddle.static.data( shape=[32, 32], dtype='float32', name="X", - append_batch_size=False, ) fluid.initializer.Constant(value=1.0)(x, main.global_block()) ops._scale(x=x, scale=10.0, out=out_var) @@ -108,12 +106,7 @@ class TestSendOp(unittest.TestCase): }, ) - x = layers.data( - shape=[32, 32], - dtype='float32', - name='X', - append_batch_size=False, - ) + x = paddle.static.data(shape=[32, 32], dtype='float32', name='X') x.persistable = True fluid.initializer.Constant(value=2.3)(x, main.global_block()) @@ -141,12 +134,7 @@ class TestSendOp(unittest.TestCase): def run_local(self, place): main = fluid.Program() with fluid.program_guard(main): - x = layers.data( - shape=[32, 32], - dtype='float32', - name='X', - append_batch_size=False, - ) + x = paddle.static.data(shape=[32, 32], dtype='float32', name='X') fluid.initializer.Constant(value=2.3)(x, main.global_block()) o = paddle.scale(x=x, scale=10.0) exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py index 71fb3f96d4..e9b8f773c7 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py +++ b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py @@ -38,14 +38,14 @@ class TranspilerTest(unittest.TestCase): self.transpiler = None def net_conf(self): - x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32') y_predict = paddle.static.nn.fc( x, size=1000, weight_attr=fluid.ParamAttr(name='fc_w'), bias_attr=fluid.ParamAttr(name='fc_b'), ) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1) @@ -292,14 +292,14 @@ class TestNoSliceVar(TranspilerTest): class TestLRDecay(TranspilerTest): def net_conf(self): - x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32') y_predict = paddle.static.nn.fc( x, size=1000, weight_attr=fluid.ParamAttr(name='fc_w'), bias_attr=fluid.ParamAttr(name='fc_b'), ) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) sgd_optimizer = fluid.optimizer.SGD( @@ -338,14 +338,14 @@ class TestFakeInit(TranspilerTest): def net_conf(self): dict_size, embedding_size, neg_num = 10000, 8, 5 - input_word = fluid.layers.data( - name="input_word", shape=[1], dtype='int64', lod_level=1 + input_word = paddle.static.data( + name="input_word", shape=[-1, 1], dtype='int64', lod_level=1 ) - true_word = fluid.layers.data( - name='true_label', shape=[1], dtype='int64', lod_level=1 + true_word = paddle.static.data( + name='true_label', shape=[-1, 1], dtype='int64', lod_level=1 ) - neg_word = fluid.layers.data( - name="neg_label", shape=[1], dtype='int64', lod_level=1 + neg_word = paddle.static.data( + name="neg_label", shape=[-1, 1], dtype='int64', lod_level=1 ) inputs = [input_word, true_word, neg_word] @@ -458,14 +458,14 @@ class TestFakeInit(TranspilerTest): class TestDecayedAdagrad(TranspilerTest): def net_conf(self): - x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32') y_predict = paddle.static.nn.fc( x, size=1000, weight_attr=fluid.ParamAttr(name='fc_w'), bias_attr=fluid.ParamAttr(name='fc_b'), ) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) opt = fluid.optimizer.DecayedAdagrad(learning_rate=0.1) @@ -478,14 +478,14 @@ class TestDecayedAdagrad(TranspilerTest): class TestFtrl(TranspilerTest): def net_conf(self): - x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32') y_predict = paddle.static.nn.fc( x, size=1000, weight_attr=fluid.ParamAttr(name='fc_w'), bias_attr=fluid.ParamAttr(name='fc_b'), ) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) opt = fluid.optimizer.Ftrl(learning_rate=0.1) @@ -498,14 +498,14 @@ class TestFtrl(TranspilerTest): class TestLRDecayConditional(TranspilerTest): def net_conf(self): - x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32') y_predict = paddle.static.nn.fc( x, size=1000, weight_attr=fluid.ParamAttr(name='fc_w'), bias_attr=fluid.ParamAttr(name='fc_b'), ) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) sgd_optimizer = fluid.optimizer.SGD( @@ -561,7 +561,7 @@ class TestLRDecayConditional(TranspilerTest): class TestL2Decay(TranspilerTest): def net_conf(self): - x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32') y_predict = paddle.static.nn.fc( x, size=1000, @@ -570,7 +570,7 @@ class TestL2Decay(TranspilerTest): ), bias_attr=fluid.ParamAttr(name='fc_b'), ) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1) @@ -599,14 +599,14 @@ class TestL2Decay(TranspilerTest): class TestL2DecayWithPiecewise(TranspilerTest): def net_conf(self): - x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32') y_predict = paddle.static.nn.fc( x, size=1000, weight_attr=fluid.ParamAttr(name='fc_w'), bias_attr=fluid.ParamAttr(name='fc_b'), ) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) base_lr = 1.0 @@ -673,7 +673,7 @@ class TestL2DecayWithPiecewise(TranspilerTest): class TestEmptyPserverOptimizeBlocks(TranspilerTest): def net_conf(self): - x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32') # only one parameter y_predict = paddle.static.nn.fc( x, @@ -681,7 +681,7 @@ class TestEmptyPserverOptimizeBlocks(TranspilerTest): weight_attr=fluid.ParamAttr(name='fc_w'), bias_attr=False, ) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=1.0) @@ -715,14 +715,14 @@ class TestDistLookupTableBase(TranspilerTest): pool = fluid.layers.sequence_pool(input=emb, pool_type='average') return pool - title_ids = fluid.layers.data( - name='title_ids', shape=[1], dtype='int64', lod_level=1 + title_ids = paddle.static.data( + name='title_ids', shape=[-1, 1], dtype='int64', lod_level=1 ) - brand_ids = fluid.layers.data( - name='brand_ids', shape=[1], dtype='int64', lod_level=1 + brand_ids = paddle.static.data( + name='brand_ids', shape=[-1, 1], dtype='int64', lod_level=1 ) - profile_ids = fluid.layers.data( - name='brand_ids', shape=[1], dtype='int64', lod_level=1 + profile_ids = paddle.static.data( + name='brand_ids', shape=[-1, 1], dtype='int64', lod_level=1 ) title_emb = emb_pool(title_ids, self.lookup_table_name, is_distributed) brand_emb = emb_pool(brand_ids, self.lookup_table_name, is_distributed) @@ -737,7 +737,7 @@ class TestDistLookupTableBase(TranspilerTest): bias_attr=fluid.ParamAttr(name='fc_b'), ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') cost = paddle.nn.functional.cross_entropy( input=predict, label=label, reduction='none', use_softmax=False ) @@ -1116,14 +1116,14 @@ class TestDistArgsInProgram(TestDistLookupTableBase): class TestRMSPropOptimizer(TranspilerTest): def net_conf(self): - x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32') y_predict = paddle.static.nn.fc( x, size=1000, weight_attr=fluid.ParamAttr(name='fc_w'), bias_attr=fluid.ParamAttr(name='fc_b'), ) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) optimizer = fluid.optimizer.RMSProp(learning_rate=0.1) @@ -1148,14 +1148,14 @@ class TestRMSPropOptimizer(TranspilerTest): class TestLoadSliceVar(TranspilerTest): def net_conf(self): - x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32') y_predict = paddle.static.nn.fc( x, size=1000, weight_attr=fluid.ParamAttr(name='fc_w'), bias_attr=fluid.ParamAttr(name='fc_b'), ) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) optimizer = fluid.optimizer.RMSProp(learning_rate=0.1) @@ -1315,8 +1315,10 @@ class TestRemoteNce(TestDistLookupTableBase): sampler = "uniform" nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32') - input = fluid.layers.data(name="input", shape=[10], dtype="float32") - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + input = paddle.static.data( + name="input", shape=[-1, 10], dtype="float32" + ) + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") w_param = ( fluid.default_main_program() @@ -1388,13 +1390,13 @@ class TestRemoteHsigmoid(TestDistLookupTableBase): num_total_classes = 3 - input = fluid.layers.data(name="input", shape=[1], dtype="float32") - label = fluid.layers.data(name="label", shape=[1], dtype="int64") - path_table = fluid.layers.data( - name='path_table', shape=[3], dtype='int64' + input = paddle.static.data(name="input", shape=[-1, 1], dtype="float32") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") + path_table = paddle.static.data( + name='path_table', shape=[-1, 3], dtype='int64' ) - path_code = fluid.layers.data( - name='path_code', shape=[3], dtype='int64' + path_code = paddle.static.data( + name='path_code', shape=[-1, 3], dtype='int64' ) w_param = ( fluid.default_main_program() diff --git a/python/paddle/fluid/tests/unittests/test_dist_tree_index.py b/python/paddle/fluid/tests/unittests/test_dist_tree_index.py index d5abb73473..b336de40cb 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_tree_index.py +++ b/python/paddle/fluid/tests/unittests/test_dist_tree_index.py @@ -17,7 +17,6 @@ import tempfile import unittest import paddle -import paddle.fluid as fluid from paddle.dataset.common import download from paddle.distributed.fleet.dataset import TreeIndex @@ -25,19 +24,19 @@ paddle.enable_static() def create_feeds(): - user_input = fluid.layers.data( - name="item_id", shape=[1], dtype="int64", lod_level=1 + user_input = paddle.static.data( + name="item_id", shape=[-1, 1], dtype="int64", lod_level=1 ) - item = fluid.layers.data( - name="unit_id", shape=[1], dtype="int64", lod_level=1 + item = paddle.static.data( + name="unit_id", shape=[-1, 1], dtype="int64", lod_level=1 ) - label = fluid.layers.data( - name="label", shape=[1], dtype="int64", lod_level=1 + label = paddle.static.data( + name="label", shape=[-1, 1], dtype="int64", lod_level=1 ) - labels = fluid.layers.data( - name="labels", shape=[1], dtype="int64", lod_level=1 + labels = paddle.static.data( + name="labels", shape=[-1, 1], dtype="int64", lod_level=1 ) feed_list = [user_input, item, label, labels] @@ -140,7 +139,7 @@ class TestIndexSampler(unittest.TestCase): slots = ["slot1", "slot2", "slot3"] slots_vars = [] for slot in slots: - var = fluid.layers.data(name=slot, shape=[1], dtype="int64") + var = paddle.static.data(name=slot, shape=[-1, 1], dtype="int64") slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() diff --git a/python/paddle/fluid/tests/unittests/test_dot_op.py b/python/paddle/fluid/tests/unittests/test_dot_op.py index 55460c2f14..d32057bfb0 100644 --- a/python/paddle/fluid/tests/unittests/test_dot_op.py +++ b/python/paddle/fluid/tests/unittests/test_dot_op.py @@ -116,16 +116,22 @@ class TestDotOpError(unittest.TestCase): # the input dtype of elementwise_mul must be float16 or float32 or float64 or int32 or int64 # float16 only can be set on GPU place - x1 = fluid.layers.data(name='x1', shape=[120], dtype="uint8") - y1 = fluid.layers.data(name='y1', shape=[120], dtype="uint8") + x1 = paddle.static.data(name='x1', shape=[-1, 120], dtype="uint8") + y1 = paddle.static.data(name='y1', shape=[-1, 120], dtype="uint8") self.assertRaises(Exception, paddle.dot, x1, y1) - x2 = fluid.layers.data(name='x2', shape=[2, 3], dtype="float32") - y2 = fluid.layers.data(name='y2', shape=[2, 3], dtype="float32") + x2 = paddle.static.data( + name='x2', shape=[-1, 2, 3], dtype="float32" + ) + y2 = paddle.static.data( + name='y2', shape=[-1, 2, 3], dtype="float32" + ) self.assertRaises(Exception, paddle.dot, x2, y2) - x3 = fluid.layers.data(name='x3', shape=[3], dtype="float32") - y3 = fluid.layers.data(name='y3', shape=[2, 3], dtype="float32") + x3 = paddle.static.data(name='x3', shape=[-1, 3], dtype="float32") + y3 = paddle.static.data( + name='y3', shape=[-1, 2, 3], dtype="float32" + ) self.assertRaises(Exception, paddle.dot, x2, y3) diff --git a/python/paddle/fluid/tests/unittests/test_downpoursgd.py b/python/paddle/fluid/tests/unittests/test_downpoursgd.py index 29e022c4ff..652660b8e0 100644 --- a/python/paddle/fluid/tests/unittests/test_downpoursgd.py +++ b/python/paddle/fluid/tests/unittests/test_downpoursgd.py @@ -52,12 +52,12 @@ class TestListenAndServOp(unittest.TestCase): cache_path ) os.system(cmd) - x = fluid.layers.data(name='x', shape=[1], dtype='int64') + x = paddle.static.data(name='x', shape=[-1, 1], dtype='int64') x_emb = fluid.layers.embedding( input=x, size=[1, 2], is_distributed=True ) y_predict = paddle.static.nn.fc(x=x_emb, size=1) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y ) @@ -116,12 +116,12 @@ class TestListenAndServOp(unittest.TestCase): cache_path ) os.system(cmd) - x = fluid.layers.data(name='x', shape=[1], dtype='int64') + x = paddle.static.data(name='x', shape=[-1, 1], dtype='int64') x_emb = fluid.layers.embedding( input=x, size=[1, 2], is_distributed=True ) y_predict = paddle.static.nn.fc(x=x_emb, size=1) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y ) @@ -178,12 +178,12 @@ class TestListenAndServOp(unittest.TestCase): cache_path ) os.system(cmd) - x = fluid.layers.data(name='x', shape=[1], dtype='int64') + x = paddle.static.data(name='x', shape=[-1, 1], dtype='int64') x_emb = fluid.layers.embedding( input=x, size=[1, 2], is_distributed=True ) y_predict = paddle.static.nn.fc(x=x_emb, size=1) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y ) diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index f68b8b0561..9a48b877f5 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -349,8 +349,8 @@ class TestDropoutOpError(unittest.TestCase): def test_dtype(): # the input dtype of dropout must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data( - name='x2', shape=[3, 4, 5, 6], dtype="int32" + x2 = paddle.static.data( + name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32" ) paddle.nn.functional.dropout(x2, p=0.5) diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py index e79773e8d4..8e07d427e4 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py @@ -164,10 +164,12 @@ class TestDygraphMultiForward(unittest.TestCase): paddle.dataset.mnist.train(), batch_size=128, drop_last=True ) - img = fluid.layers.data( - name='pixel', shape=[1, 28, 28], dtype='float32' + img = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') cost = mnist(img) loss = paddle.nn.functional.cross_entropy( cost, label, reduction='none', use_softmax=False diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py index 43bd952708..a12a17636b 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py @@ -32,8 +32,8 @@ fluid.core._set_eager_deletion_mode(0.0, 1.0, True) def simple_fc_net(): - image = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + image = paddle.static.data(name='image', shape=[-1, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') hidden = image for _ in range(4): hidden = paddle.static.nn.fc( diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py index f9294f152d..44153b6e2f 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py @@ -41,11 +41,11 @@ def train(network, use_cuda, use_parallel_executor, batch_size=32, pass_num=2): reader = fake_imdb_reader(word_dict_size, batch_size * 40) train_reader = paddle.batch(reader, batch_size=batch_size) - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 ) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") cost = network(data, label, word_dict_size) cost.persistable = True diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py index 80bc977f09..5657eb174c 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py @@ -333,30 +333,22 @@ def lm_model( return real_res, last_hidden, last_cell batch_size_each = batch_size - x = layers.data( - name="x", - shape=[batch_size_each, num_steps, 1], - dtype='int64', - append_batch_size=False, + x = paddle.static.data( + name="x", shape=[batch_size_each, num_steps, 1], dtype='int64' ) - y = layers.data( - name="y", - shape=[batch_size_each * num_steps, 1], - dtype='int64', - append_batch_size=False, + y = paddle.static.data( + name="y", shape=[batch_size_each * num_steps, 1], dtype='int64' ) - init_hidden = layers.data( + init_hidden = paddle.static.data( name="init_hidden", shape=[num_layers, batch_size_each, hidden_size], dtype='float32', - append_batch_size=False, ) - init_cell = layers.data( + init_cell = paddle.static.data( name="init_cell", shape=[num_layers, batch_size_each, hidden_size], dtype='float32', - append_batch_size=False, ) init_cell.persistable = True diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py index 10f5def724..bd4e088195 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_recurrent_op.py @@ -139,15 +139,14 @@ class EagerDeletionRecurrentOpTest1(unittest.TestCase): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data( + x = paddle.static.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', - append_batch_size=False, ) x.stop_gradient = False - h_boot = layers.data( - shape=[self.input_dim], dtype='float32', name='h_boot' + h_boot = paddle.static.data( + shape=[-1, self.input_dim], dtype='float32', name='h_boot' ) h_boot.stop_gradient = False @@ -292,15 +291,14 @@ class EagerDeletionRecurrentOpTest2(EagerDeletionRecurrentOpTest1): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data( + x = paddle.static.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', - append_batch_size=False, ) x.stop_gradient = False - h_boot = layers.data( - shape=[self.input_dim], dtype='float32', name='h_boot' + h_boot = paddle.static.data( + shape=[-1, self.input_dim], dtype='float32', name='h_boot' ) h_boot.stop_gradient = False @@ -402,25 +400,22 @@ class EagerDeletionRecurrentOpMultipleMemoryTest(EagerDeletionRecurrentOpTest1): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data( + x = paddle.static.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', - append_batch_size=False, ) x.stop_gradient = False - h_boot1 = layers.data( + h_boot1 = paddle.static.data( shape=[self.batch_size, self.input_dim], dtype='float32', name='h_boot1', - append_batch_size=False, ) h_boot1.stop_gradient = False - h_boot2 = layers.data( + h_boot2 = paddle.static.data( shape=[self.batch_size, self.input_dim], dtype='float32', name='h_boot2', - append_batch_size=False, ) h_boot2.stop_gradient = False @@ -490,11 +485,10 @@ class EagerDeletionRecurrentOpNoMemBootTest(EagerDeletionRecurrentOpTest1): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data( + x = paddle.static.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', - append_batch_size=False, ) x.stop_gradient = False @@ -570,11 +564,10 @@ class EagerDeletionTwoRecurrentOpsTest(EagerDeletionRecurrentOpTest1): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data( + x = paddle.static.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', - append_batch_size=False, ) x.stop_gradient = False @@ -673,15 +666,14 @@ class EagerDeletionFarwardOnlyRnnAndBackwardRnnTest( self.py_rnn = PySimpleRNN1(self.input_shape, self.output_shape) with fluid.program_guard(self.main_program, self.startup_program): - x = layers.data( + x = paddle.static.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', - append_batch_size=False, ) x.stop_gradient = False - h_boot = layers.data( - shape=[self.input_dim], dtype='float32', name='h_boot' + h_boot = paddle.static.data( + shape=[-1, self.input_dim], dtype='float32', name='h_boot' ) h_boot.stop_gradient = False diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py index f2b5f667fd..097b5de0b9 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py @@ -66,15 +66,9 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): else 1 ) - d0 = layers.data( - "d0", shape=[10], append_batch_size=False, dtype='float32' - ) - d1 = layers.data( - "d1", shape=[10], append_batch_size=False, dtype='float32' - ) - d2 = layers.data( - "d2", shape=[10], append_batch_size=False, dtype='float32' - ) + d0 = paddle.static.data("d0", shape=[-1, 10], dtype='float32') + d1 = paddle.static.data("d1", shape=[-1, 10], dtype='float32') + d2 = paddle.static.data("d2", shape=[-1, 10], dtype='float32') i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py b/python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py index 4be4ddd228..2c5da64817 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_nn_grad.py @@ -21,7 +21,6 @@ from decorator_helper import prog_scope import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers class TestElementwiseMulDoubleGradCheck(unittest.TestCase): @@ -32,8 +31,8 @@ class TestElementwiseMulDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', shape, False, dtype) - y = layers.data('y', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) + y = paddle.static.data('y', shape, dtype) x.persistable = True y.persistable = True out = paddle.multiply(x, y) @@ -61,8 +60,8 @@ class TestElementwiseMulBroadcastDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', shape, False, dtype) - y = layers.data('y', shape[:-1], False, dtype) + x = paddle.static.data('x', shape, dtype) + y = paddle.static.data('y', shape[:-1], dtype) x.persistable = True y.persistable = True out = paddle.tensor.math._multiply_with_axis(x, y, axis=0) @@ -90,8 +89,8 @@ class TestElementwiseAddDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', shape, False, dtype) - y = layers.data('y', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) + y = paddle.static.data('y', shape, dtype) x.persistable = True y.persistable = True out = paddle.add(x, y) @@ -119,8 +118,8 @@ class TestElementwiseAddBroadcastDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', shape, False, dtype) - y = layers.data('y', shape[:-1], False, dtype) + x = paddle.static.data('x', shape, dtype) + y = paddle.static.data('y', shape[:-1], dtype) x.persistable = True y.persistable = True out = paddle.tensor.math._add_with_axis(x, y, axis=0) @@ -151,8 +150,8 @@ class TestElementwiseSubDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', shape, False, dtype) - y = layers.data('y', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) + y = paddle.static.data('y', shape, dtype) x.persistable = True y.persistable = True out = paddle.subtract(x, y) @@ -187,8 +186,8 @@ class TestElementwiseSubBroadcastDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', shape, False, dtype) - y = layers.data('y', shape[:-1], False, dtype) + x = paddle.static.data('x', shape, dtype) + y = paddle.static.data('y', shape[:-1], dtype) x.persistable = True y.persistable = True out = paddle.tensor.math._subtract_with_axis(x, y, axis=0) @@ -219,8 +218,8 @@ class TestElementwiseDivDoubleGradCheck(unittest.TestCase): eps = 0.0001 dtype = np.float64 - x = layers.data('x', shape, False, dtype) - y = layers.data('y', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) + y = paddle.static.data('y', shape, dtype) x.persistable = True y.persistable = True out = paddle.tensor.math._divide_with_axis(x, y, axis=0) @@ -257,8 +256,8 @@ class TestElementwiseDivBroadcastDoubleGradCheck(unittest.TestCase): eps = 0.0001 dtype = np.float64 - x = layers.data('x', shape, False, dtype) - y = layers.data('y', shape[1:-1], False, dtype) + x = paddle.static.data('x', shape, dtype) + y = paddle.static.data('y', shape[1:-1], dtype) x.persistable = True y.persistable = True out = paddle.tensor.math._divide_with_axis(x, y, axis=1) @@ -287,8 +286,8 @@ class TestElementwiseAddTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', shape, False, dtype) - y = layers.data('y', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) + y = paddle.static.data('y', shape, dtype) x.persistable = True y.persistable = True out = paddle.add(x, y) @@ -316,8 +315,8 @@ class TestElementwiseAddBroadcastTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', shape, False, dtype) - y = layers.data('y', shape[:-1], False, dtype) + x = paddle.static.data('x', shape, dtype) + y = paddle.static.data('y', shape[:-1], dtype) x.persistable = True y.persistable = True out = paddle.tensor.math._add_with_axis(x, y, axis=0) @@ -348,8 +347,8 @@ class TestElementwiseMulTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', shape, False, dtype) - y = layers.data('y', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) + y = paddle.static.data('y', shape, dtype) x.persistable = True y.persistable = True out = paddle.multiply(x, y) @@ -384,8 +383,8 @@ class TestElementwiseMulBroadcastTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', shape, False, dtype) - y = layers.data('y', shape[:-1], False, dtype) + x = paddle.static.data('x', shape, dtype) + y = paddle.static.data('y', shape[:-1], dtype) x.persistable = True y.persistable = True out = paddle.tensor.math._add_with_axis(x, y, axis=0) diff --git a/python/paddle/fluid/tests/unittests/test_entry_attr.py b/python/paddle/fluid/tests/unittests/test_entry_attr.py index 07d2ab7fa8..6cc7606a7a 100644 --- a/python/paddle/fluid/tests/unittests/test_entry_attr.py +++ b/python/paddle/fluid/tests/unittests/test_entry_attr.py @@ -67,12 +67,8 @@ class EntryAttrChecks(unittest.TestCase): with fluid.scope_guard(scope): with fluid.program_guard(prog): - input = fluid.layers.data( - name="dnn_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False, + input = paddle.static.data( + name="dnn_data", shape=[-1, 1], dtype="int64", lod_level=1 ) prob = ProbabilityEntry(0.5) emb = paddle.static.nn.sparse_embedding( diff --git a/python/paddle/fluid/tests/unittests/test_entry_attr2.py b/python/paddle/fluid/tests/unittests/test_entry_attr2.py index 5db31f906f..d06e248f90 100644 --- a/python/paddle/fluid/tests/unittests/test_entry_attr2.py +++ b/python/paddle/fluid/tests/unittests/test_entry_attr2.py @@ -28,12 +28,8 @@ class EntryAttrChecks(unittest.TestCase): with fluid.scope_guard(scope): with fluid.program_guard(prog): - input = fluid.layers.data( - name="dnn_data", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False, + input = paddle.static.data( + name="dnn_data", shape=[-1, 1], dtype="int64", lod_level=1 ) emb = fluid.layers.embedding( input=input, diff --git a/python/paddle/fluid/tests/unittests/test_exception.py b/python/paddle/fluid/tests/unittests/test_exception.py index aca120b48f..57faeaacc0 100644 --- a/python/paddle/fluid/tests/unittests/test_exception.py +++ b/python/paddle/fluid/tests/unittests/test_exception.py @@ -40,8 +40,8 @@ class TestExceptionNoCStack(unittest.TestCase): fluid.set_flags({'FLAGS_call_stack_level': 1}) def test_exception_in_static_mode(self): - x = fluid.layers.data(name='X', shape=[-1, 13], dtype='float32') - y = fluid.layers.data(name='Y', shape=[-1, 1], dtype='float32') + x = paddle.static.data(name='X', shape=[-1, 13], dtype='float32') + y = paddle.static.data(name='Y', shape=[-1, 1], dtype='float32') predict = paddle.static.nn.fc(x, size=1) loss = paddle.nn.functional.square_error_cost(input=predict, label=y) avg_loss = paddle.mean(loss) diff --git a/python/paddle/fluid/tests/unittests/test_executor_and_mul.py b/python/paddle/fluid/tests/unittests/test_executor_and_mul.py index d1c822b582..f9bb4286ad 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_and_mul.py +++ b/python/paddle/fluid/tests/unittests/test_executor_and_mul.py @@ -18,20 +18,19 @@ import numpy as np import paddle from paddle.fluid.executor import Executor -from paddle.fluid.layers import data, zeros +from paddle.fluid.layers import zeros +from paddle.static import data from paddle.tensor import array_write class TestExecutor(unittest.TestCase): def test_mul(self): i = zeros(shape=[1], dtype='int64') - a = data(name='a', shape=[784], dtype='float32') + a = data(name='a', shape=[-1, 784], dtype='float32') array = array_write(x=a, i=i) i = paddle.increment(i) - b = data( - name='b', shape=[784, 100], dtype='float32', append_batch_size=False - ) + b = data(name='b', shape=[784, 100], dtype='float32') array_write(x=b, i=i, array=array) i = paddle.increment(i) diff --git a/python/paddle/fluid/tests/unittests/test_executor_and_use_program_cache.py b/python/paddle/fluid/tests/unittests/test_executor_and_use_program_cache.py index 623f2065bc..fe9d09cb54 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_and_use_program_cache.py +++ b/python/paddle/fluid/tests/unittests/test_executor_and_use_program_cache.py @@ -26,13 +26,10 @@ class TestExecutor(unittest.TestCase): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - a = fluid.layers.data(name='a', shape=[784], dtype='float32') - b = fluid.layers.data( - name='b', - shape=[784, 100], - dtype='float32', - append_batch_size=False, - ) + a = paddle.static.data(name='a', shape=[-1, 784], dtype='float32') + b = paddle.static.data(name='b', shape=[784, 100], dtype='float32') + a.desc.set_need_check_feed(False) + b.desc.set_need_check_feed(False) output = paddle.matmul(x=a, y=b) # Compute with numpy diff --git a/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py index fccfab3e4c..8fb2110bfe 100755 --- a/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py @@ -100,10 +100,10 @@ class TestExpandAsOpRank5(TestExpandAsBasic): class TestExpandAsV2Error(unittest.TestCase): def test_errors(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - x1 = fluid.layers.data(name='x1', shape=[4], dtype="uint8") - x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32") + x1 = paddle.static.data(name='x1', shape=[-1, 4], dtype="uint8") + x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="int32") self.assertRaises(TypeError, paddle.tensor.expand_as, x1, x2) - x3 = fluid.layers.data(name='x3', shape=[4], dtype="bool") + x3 = paddle.static.data(name='x3', shape=[-1, 4], dtype="bool") x3.stop_gradient = False self.assertRaises(ValueError, paddle.tensor.expand_as, x3, x2) @@ -113,14 +113,11 @@ class TestExpandAsV2API(unittest.TestCase): def test_api(self): input1 = np.random.random([12, 14]).astype("float32") input2 = np.random.random([2, 12, 14]).astype("float32") - x = fluid.layers.data( - name='x', shape=[12, 14], append_batch_size=False, dtype="float32" - ) + x = paddle.static.data(name='x', shape=[12, 14], dtype="float32") - y = fluid.layers.data( + y = paddle.static.data( name='target_tensor', shape=[2, 12, 14], - append_batch_size=False, dtype="float32", ) diff --git a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py index 8839def692..0a5eda417e 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py @@ -21,7 +21,6 @@ from op_test import OpTest import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import Program, core, program_guard @@ -194,9 +193,9 @@ class TestExpandV2Error(unittest.TestCase): ) shape = [2, 2] self.assertRaises(TypeError, paddle.tensor.expand, x1, shape) - x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") + x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="uint8") self.assertRaises(TypeError, paddle.tensor.expand, x2, shape) - x3 = fluid.layers.data(name='x3', shape=[4], dtype="bool") + x3 = paddle.static.data(name='x3', shape=[-1, 4], dtype="bool") x3.stop_gradient = False self.assertRaises(ValueError, paddle.tensor.expand, x3, shape) @@ -205,15 +204,12 @@ class TestExpandV2Error(unittest.TestCase): class TestExpandV2API(unittest.TestCase): def test_api(self): input = np.random.random([12, 14]).astype("float32") - x = fluid.layers.data( - name='x', shape=[12, 14], append_batch_size=False, dtype="float32" - ) + x = paddle.static.data(name='x', shape=[12, 14], dtype="float32") positive_2 = fluid.layers.fill_constant([1], "int32", 12) - expand_shape = fluid.layers.data( + expand_shape = paddle.static.data( name="expand_shape", shape=[2], - append_batch_size=False, dtype="int32", ) @@ -273,7 +269,7 @@ class TestExpandDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 3], False, dtype) + data = paddle.static.data('data', [2, 3], dtype) data.persistable = True out = paddle.expand(data, [2, 3]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) @@ -304,7 +300,7 @@ class TestExpandTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 3], False, dtype) + data = paddle.static.data('data', [2, 3], dtype) data.persistable = True out = paddle.expand(data, [2, 3]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_fc_op.py b/python/paddle/fluid/tests/unittests/test_fc_op.py index b07fc1d000..03ef6c3db6 100644 --- a/python/paddle/fluid/tests/unittests/test_fc_op.py +++ b/python/paddle/fluid/tests/unittests/test_fc_op.py @@ -146,10 +146,9 @@ class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase): with program_guard(main_program, startup_program): input = np.random.random([2, 2, 25]).astype("float32") - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[2, 2, 25], - append_batch_size=False, dtype="float32", ) @@ -191,13 +190,13 @@ class TestFCOpError(unittest.TestCase): def test_type(): # dtype must be float32 or float64 - x2 = fluid.layers.data(name='x2', shape=[4], dtype='int32') + x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype='int32') paddle.static.nn.fc(x=x2, size=1) self.assertRaises(TypeError, test_type) # The input dtype of fc can be float16 in GPU, test for warning - x3 = fluid.layers.data(name='x3', shape=[4], dtype='float16') + x3 = paddle.static.data(name='x3', shape=[-1, 4], dtype='float16') paddle.static.nn.fc(x=x3, size=1) diff --git a/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py b/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py index b642e2524e..0e5330014d 100644 --- a/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py +++ b/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py @@ -30,7 +30,7 @@ np.random.seed(123) class TestFeedData(unittest.TestCase): ''' Test paddle.fluid.data feeds with different shape and types. - Note: paddle.fluid.data is not paddle.fluid.layers.data. + Note: paddle.fluid.data is not paddle.static.data. ''' def setUp(self): diff --git a/python/paddle/fluid/tests/unittests/test_fetch_unmerged.py b/python/paddle/fluid/tests/unittests/test_fetch_unmerged.py index d93ee36b6e..f7313f93e0 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_unmerged.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_unmerged.py @@ -57,11 +57,11 @@ class TestFetchUnmerged(unittest.TestCase): def build_program(self, main, startup, is_test): with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - img = fluid.layers.data( - name='image', shape=[1, 28, 28], dtype='float32' + img = paddle.static.data( + name='image', shape=[-1, 1, 28, 28], dtype='float32' ) - label = fluid.layers.data( - name='label', shape=[1], dtype='int64' + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) loss, prediction = self.conv_net(img, label) if not is_test: diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py index 5e1af99259..38ef037974 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_op.py @@ -379,7 +379,7 @@ class TestFillConstantOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # for ci coverage - x1 = fluid.layers.data(name='x1', shape=[1], dtype="int16") + x1 = paddle.static.data(name='x1', shape=[-1, 1], dtype="int16") self.assertRaises( TypeError, fluid.layers.fill_constant, @@ -399,7 +399,7 @@ class TestFillConstantOpError(unittest.TestCase): # The argument dtype of fill_constant_op must be one of bool, float16, # float32, float64, uint8, int16, int32 or int64 - x2 = fluid.layers.data(name='x2', shape=[1], dtype="int32") + x2 = paddle.static.data(name='x2', shape=[-1, 1], dtype="int32") self.assertRaises( TypeError, diff --git a/python/paddle/fluid/tests/unittests/test_fleet.py b/python/paddle/fluid/tests/unittests/test_fleet.py index bc5a083d17..736f68be35 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet.py +++ b/python/paddle/fluid/tests/unittests/test_fleet.py @@ -52,12 +52,11 @@ class TestFleet1(unittest.TestCase): startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(train_program, startup_program): - show = fluid.layers.data( + show = paddle.static.data( name="show", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) emb = fluid.layers.embedding( input=show, @@ -71,12 +70,11 @@ class TestFleet1(unittest.TestCase): input=bow, epsilon=1e-4, name="norm" ) fc = paddle.static.nn.fc(x=bow, size=1, activation=None) - label = fluid.layers.data( + label = paddle.static.data( name="click", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) label_cast = fluid.layers.cast(label, dtype='float32') cost = paddle.nn.functional.log_loss(fc, label_cast) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_api_input.py b/python/paddle/fluid/tests/unittests/test_fleet_api_input.py index 12acfdf763..b57a30d752 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_api_input.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_api_input.py @@ -61,7 +61,7 @@ class FleetTest(unittest.TestCase): self.assertRaises(Exception, fleet.split_files, "files") self.assertRaises(Exception, fleet.init, "pserver") - data = fluid.layers.data(name='X', shape=[1], dtype='float32') + data = paddle.static.data(name='X', shape=[-1, 1], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) adam = fluid.optimizer.Adam() @@ -176,7 +176,7 @@ class TranspilerOptimizerTest(unittest.TestCase): transpiler = TranspilerOptimizer(fluid.optimizer.Adam(0.001)) self.assertRaises(Exception, transpiler.minimize, loss=[]) - data = fluid.layers.data(name='X', shape=[1], dtype='float32') + data = paddle.static.data(name='X', shape=[-1, 1], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) self.assertRaises( diff --git a/python/paddle/fluid/tests/unittests/test_fleet_auto.py b/python/paddle/fluid/tests/unittests/test_fleet_auto.py index 1e30f703ff..b9b18f04fa 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_auto.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_auto.py @@ -32,10 +32,8 @@ class TestDistributedStrategyAuto(unittest.TestCase): def test_distributed_strategy_auto(self): fleet.init(is_collective=True) - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base_2.py b/python/paddle/fluid/tests/unittests/test_fleet_base_2.py index 4a56f8913a..ee5a84d1e4 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base_2.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base_2.py @@ -39,13 +39,11 @@ class TestFleetBase(unittest.TestCase): os.environ["TRAINING_ROLE"] = "TRAINER" os.environ["PADDLE_TRAINER_ID"] = "1" - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' + input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') + input_slot = paddle.static.data( + name="slot", shape=[-1, 1], dtype='int64' ) - input_slot = paddle.fluid.layers.data( - name="slot", shape=[1], dtype='int64' - ) - input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') emb = paddle.fluid.layers.embedding( input=input_slot, size=[10, 9], is_sparse=True diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base_3.py b/python/paddle/fluid/tests/unittests/test_fleet_base_3.py index 30cdf5fbed..e24beee28e 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base_3.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base_3.py @@ -32,10 +32,8 @@ class TestFleetBase_1(unittest.TestCase): ] = "127.0.0.1:36001,127.0.0.2:36001" def test_collective_minimize(self): - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') @@ -63,10 +61,8 @@ class TestFleetBase(unittest.TestCase): ] = "127.0.0.1:36001,127.0.0.2:36001" def test_fleet_get_applied_optimizer(self): - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32' - ) - input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') + input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh') fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/test_fleet_executor.py b/python/paddle/fluid/tests/unittests/test_fleet_executor.py index 400009f820..d798ffb016 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_executor.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_executor.py @@ -45,12 +45,14 @@ class TestFleetExecutor(unittest.TestCase): exe = paddle.static.Executor(place) empty_program = paddle.static.Program() with fluid.program_guard(empty_program, empty_program): - x = fluid.layers.data( - name='x', shape=x_data.shape, dtype=x_data.dtype + x = paddle.static.data( + name='x', shape=[-1] + list(x_data.shape), dtype=x_data.dtype ) - y = fluid.layers.data( - name='y', shape=y_data.shape, dtype=y_data.dtype + x.desc.set_need_check_feed(False) + y = paddle.static.data( + name='y', shape=[-1] + list(y_data.shape), dtype=y_data.dtype ) + y.desc.set_need_check_feed(False) z = x + y a = 2 * x + 3 * y loss = paddle.mean(a) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_executor_multi_devices.py b/python/paddle/fluid/tests/unittests/test_fleet_executor_multi_devices.py index b2a1c488dc..53bd4a20bc 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_executor_multi_devices.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_executor_multi_devices.py @@ -27,7 +27,9 @@ class TestFleetExecutor(unittest.TestCase): exe = paddle.static.Executor(place) empty_program = paddle.static.Program() with fluid.program_guard(empty_program, empty_program): - x = fluid.layers.data(name='x', shape=[1], dtype=paddle.float32) + x = paddle.static.data( + name='x', shape=[-1, 1], dtype=paddle.float32 + ) empty_program._pipeline_opt = { "fleet_opt": fleet_opt, "section_program": empty_program, diff --git a/python/paddle/fluid/tests/unittests/test_fleet_executor_origin_scheduler.py b/python/paddle/fluid/tests/unittests/test_fleet_executor_origin_scheduler.py index d24348b7d7..726687d87d 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_executor_origin_scheduler.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_executor_origin_scheduler.py @@ -45,12 +45,14 @@ class TestFleetExecutor(unittest.TestCase): exe = paddle.static.Executor(place) empty_program = paddle.static.Program() with fluid.program_guard(empty_program, empty_program): - x = fluid.layers.data( - name='x', shape=x_data.shape, dtype=x_data.dtype + x = paddle.static.data( + name='x', shape=[-1] + list(x_data.shape), dtype=x_data.dtype ) - y = fluid.layers.data( - name='y', shape=y_data.shape, dtype=y_data.dtype + x.desc.set_need_check_feed(False) + y = paddle.static.data( + name='y', shape=[-1] + list(y_data.shape), dtype=y_data.dtype ) + y.desc.set_need_check_feed(False) z = x + y a = 2 * x + 3 * y loss = paddle.mean(a) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_executor_with_task_nodes.py b/python/paddle/fluid/tests/unittests/test_fleet_executor_with_task_nodes.py index 46eb0dc6f0..8c12fb9735 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_executor_with_task_nodes.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_executor_with_task_nodes.py @@ -28,12 +28,14 @@ class TestFleetExecutor(unittest.TestCase): exe = paddle.static.Executor(place) empty_program = paddle.static.Program() with fluid.program_guard(empty_program, empty_program): - x = fluid.layers.data( - name='x', shape=x_data.shape, dtype=x_data.dtype + x = paddle.static.data( + name='x', shape=[-1] + list(x_data.shape), dtype=x_data.dtype ) - y = fluid.layers.data( - name='y', shape=y_data.shape, dtype=y_data.dtype + x.desc.set_need_check_feed(False) + y = paddle.static.data( + name='y', shape=[-1] + list(y_data.shape), dtype=y_data.dtype ) + y.desc.set_need_check_feed(False) z = x + y a = 2 * x + 3 * y loss = paddle.mean(a) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_nocvm_1.py b/python/paddle/fluid/tests/unittests/test_fleet_nocvm_1.py index c651a456fd..09a9db8ccd 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_nocvm_1.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_nocvm_1.py @@ -52,12 +52,11 @@ class TestFleet1(unittest.TestCase): startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(train_program, startup_program): - show = fluid.layers.data( + show = paddle.static.data( name="show", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) emb = fluid.layers.embedding( input=show, @@ -67,12 +66,11 @@ class TestFleet1(unittest.TestCase): param_attr=fluid.ParamAttr(name="embedding"), ) fc = paddle.static.nn.fc(x=emb, size=1, activation=None) - label = fluid.layers.data( + label = paddle.static.data( name="click", shape=[-1, 1], dtype="int64", lod_level=1, - append_batch_size=False, ) label_cast = fluid.layers.cast(label, dtype='float32') cost = paddle.nn.functional.log_loss(fc, label_cast) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker.py b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker.py index 4c3c321ac0..893956587e 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker.py @@ -82,20 +82,12 @@ class TestCloudRoleMaker(unittest.TestCase): startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(train_program, startup_program): - show = fluid.layers.data( - name="show", - shape=[-1, 1], - dtype="float32", - lod_level=1, - append_batch_size=False, + show = paddle.static.data( + name="show", shape=[-1, 1], dtype="float32", lod_level=1 ) fc = paddle.static.nn.fc(x=show, size=1, activation=None) - label = fluid.layers.data( - name="click", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False, + label = paddle.static.data( + name="click", shape=[-1, 1], dtype="int64", lod_level=1 ) label_cast = fluid.layers.cast(label, dtype='float32') cost = paddle.nn.functional.log_loss(fc, label_cast) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_2.py b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_2.py index 50a6013e2d..ece84ed1d5 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_2.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_2.py @@ -63,20 +63,12 @@ class TestCloudRoleMaker2(unittest.TestCase): startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(train_program, startup_program): - show = fluid.layers.data( - name="show", - shape=[-1, 1], - dtype="float32", - lod_level=1, - append_batch_size=False, + show = paddle.static.data( + name="show", shape=[-1, 1], dtype="float32", lod_level=1 ) fc = paddle.static.nn.fc(x=show, size=1, activation=None) - label = fluid.layers.data( - name="click", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False, + label = paddle.static.data( + name="click", shape=[-1, 1], dtype="int64", lod_level=1 ) label_cast = fluid.layers.cast(label, dtype='float32') cost = paddle.nn.functional.log_loss(fc, label_cast) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_3.py b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_3.py index 27cb171c0d..94e293978b 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_3.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_3.py @@ -56,20 +56,12 @@ class TestCloudRoleMaker(unittest.TestCase): startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(train_program, startup_program): - show = fluid.layers.data( - name="show", - shape=[-1, 1], - dtype="float32", - lod_level=1, - append_batch_size=False, + show = paddle.static.data( + name="show", shape=[-1, 1], dtype="float32", lod_level=1 ) fc = paddle.static.nn.fc(x=show, size=1, activation=None) - label = fluid.layers.data( - name="click", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False, + label = paddle.static.data( + name="click", shape=[-1, 1], dtype="int64", lod_level=1 ) label_cast = fluid.layers.cast(label, dtype='float32') cost = paddle.nn.functional.log_loss(fc, label_cast) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_unitaccessor.py b/python/paddle/fluid/tests/unittests/test_fleet_unitaccessor.py index 178fcfa230..3eb24d9b40 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_unitaccessor.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_unitaccessor.py @@ -52,12 +52,8 @@ class TestFleet1(unittest.TestCase): startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(train_program, startup_program): - show = fluid.layers.data( - name="show", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False, + show = paddle.static.data( + name="show", shape=[-1, 1], dtype="int64", lod_level=1 ) emb = fluid.layers.embedding( input=show, @@ -67,12 +63,8 @@ class TestFleet1(unittest.TestCase): param_attr=fluid.ParamAttr(name="embedding"), ) fc = paddle.static.nn.fc(x=emb, size=1, activation=None) - label = fluid.layers.data( - name="click", - shape=[-1, 1], - dtype="int64", - lod_level=1, - append_batch_size=False, + label = paddle.static.data( + name="click", shape=[-1, 1], dtype="int64", lod_level=1 ) label_cast = fluid.layers.cast(label, dtype='float32') cost = paddle.nn.functional.log_loss(fc, label_cast) diff --git a/python/paddle/fluid/tests/unittests/test_flip.py b/python/paddle/fluid/tests/unittests/test_flip.py index f7ebd3d1d5..1807199821 100644 --- a/python/paddle/fluid/tests/unittests/test_flip.py +++ b/python/paddle/fluid/tests/unittests/test_flip.py @@ -22,7 +22,6 @@ from op_test import OpTest import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers class TestFlipOp_API(unittest.TestCase): @@ -145,7 +144,7 @@ class TestFlipDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [3, 2, 2], False, dtype) + data = paddle.static.data('data', [3, 2, 2], dtype) data.persistable = True out = paddle.flip(data, [0, 1]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) @@ -177,7 +176,7 @@ class TestFlipTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [3, 2, 2], False, dtype) + data = paddle.static.data('data', [3, 2, 2], dtype) data.persistable = True out = paddle.flip(data, [0, 1]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py index 9e06745448..4d7fb60d46 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py @@ -21,8 +21,10 @@ import paddle.fluid as fluid class TestFuseBatchNormActPass(unittest.TestCase): def build_program(self, main_program, startup_program, use_cuda, seed=1): with fluid.program_guard(main_program, startup_program): - x = fluid.layers.data(name='x', shape=[1, 28, 28], dtype='float32') - y = fluid.layers.data(name="y", shape=[1], dtype='int64') + x = paddle.static.data( + name='x', shape=[-1, 1, 28, 28], dtype='float32' + ) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') hidden1 = paddle.static.nn.conv2d( input=x, filter_size=3, diff --git a/python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py index a8d0f89c86..d981ccbe14 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py @@ -64,8 +64,10 @@ class TestFusedBnAddActAPI(unittest.TestCase): self, main_program, startup_program, use_cuda, seed=1 ): with fluid.program_guard(main_program, startup_program): - x = fluid.layers.data(name='x', shape=[1, 28, 28], dtype='float32') - y = fluid.layers.data(name="y", shape=[1], dtype='int64') + x = paddle.static.data( + name='x', shape=[-1, 1, 28, 28], dtype='float32' + ) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') conv1_1 = paddle.static.nn.conv2d( input=x, filter_size=3, @@ -123,8 +125,10 @@ class TestFusedBnAddActAPI(unittest.TestCase): self, main_program, startup_program, use_cuda, seed=1 ): with fluid.program_guard(main_program, startup_program): - x = fluid.layers.data(name='x', shape=[1, 28, 28], dtype='float32') - y = fluid.layers.data(name="y", shape=[1], dtype='int64') + x = paddle.static.data( + name='x', shape=[-1, 1, 28, 28], dtype='float32' + ) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') conv1_1 = paddle.static.nn.conv2d( input=x, filter_size=3, diff --git a/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py index 388025fb2a..c4f576ce20 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py @@ -55,8 +55,8 @@ def sep_conv(input, channel, stride, filter, dilation=1, act=None): def simple_depthwise_net(use_feed): assert use_feed - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data(name='image', shape=[-1, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') hidden = paddle.reshape(img, (-1, 1, 28, 28)) for _ in range(4): hidden = sep_conv(hidden, channel=200, stride=2, filter=5) diff --git a/python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py b/python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py index 3e3f0ca05a..460bf43bbe 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_emb_seq_pool_op.py @@ -18,6 +18,7 @@ import unittest import numpy as np from op_test import OpTest, skip_check_grad_ci +import paddle import paddle.version as ver @@ -108,8 +109,8 @@ class TestFusedEmbeddingSeqPoolApi(unittest.TestCase): import paddle.fluid as fluid dict_size = 20 - data_t = fluid.layers.data( - name='word', shape=[1], dtype='int64', lod_level=1 + data_t = paddle.static.data( + name='word', shape=[-1, 1], dtype='int64', lod_level=1 ) padding_idx = np.random.randint(1, 10) out = fluid.contrib.fused_embedding_seq_pool( diff --git a/python/paddle/fluid/tests/unittests/test_gather_nd_op.py b/python/paddle/fluid/tests/unittests/test_gather_nd_op.py index 6a3f5cf855..6c2b6d15f6 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_nd_op.py @@ -160,20 +160,28 @@ class TestGatherNdOpWithHighRankDiff(OpTest): # Test Python API class TestGatherNdOpAPI(unittest.TestCase): def test_case1(self): - x1 = fluid.layers.data( - name='x1', shape=[30, 40, 50, 60], dtype='float32' + x1 = paddle.static.data( + name='x1', shape=[-1, 30, 40, 50, 60], dtype='float32' + ) + index1 = paddle.static.data( + name='index1', shape=[-1, 2, 4], dtype='int32' ) - index1 = fluid.layers.data(name='index1', shape=[2, 4], dtype='int32') output1 = paddle.gather_nd(x1, index1) def test_case2(self): - x2 = fluid.layers.data(name='x2', shape=[30, 40, 50], dtype='float32') - index2 = fluid.layers.data(name='index2', shape=[2, 2], dtype='int64') + x2 = paddle.static.data( + name='x2', shape=[-1, 30, 40, 50], dtype='float32' + ) + index2 = paddle.static.data( + name='index2', shape=[-1, 2, 2], dtype='int64' + ) output2 = paddle.gather_nd(x2, index2) def test_case3(self): - x3 = fluid.layers.data(name='x3', shape=[3, 4, 5], dtype='float32') - index3 = fluid.layers.data(name='index3', shape=[2, 1], dtype='int32') + x3 = paddle.static.data(name='x3', shape=[-1, 3, 4, 5], dtype='float32') + index3 = paddle.static.data( + name='index3', shape=[-1, 2, 1], dtype='int32' + ) output3 = paddle.gather_nd(x3, index3, name="gather_nd_layer") @@ -182,11 +190,11 @@ class TestGatherNdOpRaise(unittest.TestCase): def test_check_raise(self): def check_raise_is_test(): try: - x = fluid.layers.data( - name='x', shape=[3, 4, 5], dtype='float32' + x = paddle.static.data( + name='x', shape=[-1, 3, 4, 5], dtype='float32' ) - index = fluid.layers.data( - name='index', shape=[2, 10], dtype='int32' + index = paddle.static.data( + name='index', shape=[-1, 2, 10], dtype='int32' ) output = paddle.gather_nd(x, index) except Exception as e: @@ -231,13 +239,15 @@ class TestGatherNdError(unittest.TestCase): class TestGatherNdAPI2(unittest.TestCase): def test_static(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.layers.data('data1', shape=[-1, 2], dtype='float64') - index = fluid.layers.data('index', shape=[-1, 1], dtype='int32') + data1 = paddle.static.data('data1', shape=[-1, 2], dtype='float64') + data1.desc.set_need_check_feed(False) + index = paddle.static.data('index', shape=[-1, 1], dtype='int32') + index.desc.set_need_check_feed(False) out = paddle.gather_nd(data1, index) place = fluid.CPUPlace() exe = fluid.Executor(place) input = np.array([[1, 2], [3, 4], [5, 6]]) - index_1 = np.array([[1]]) + index_1 = np.array([[1]]).astype('int32') (result,) = exe.run( feed={"data1": input, "index": index_1}, fetch_list=[out] ) diff --git a/python/paddle/fluid/tests/unittests/test_gather_op.py b/python/paddle/fluid/tests/unittests/test_gather_op.py index 44ab250c76..2f2538769a 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_op.py @@ -225,8 +225,10 @@ class TestGatherOp4(TestGatherOp1): class API_TestGather(unittest.TestCase): def test_out1(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.layers.data('data1', shape=[-1, 2], dtype='float64') - index = fluid.layers.data('index', shape=[-1, 1], dtype='int32') + data1 = paddle.static.data('data1', shape=[-1, 2], dtype='float64') + data1.desc.set_need_check_feed(False) + index = paddle.static.data('index', shape=[-1, 1], dtype='int32') + index.desc.set_need_check_feed(False) out = paddle.gather(data1, index) place = fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_gather_tree_op.py b/python/paddle/fluid/tests/unittests/test_gather_tree_op.py index bcd319ed2d..79580339a0 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_tree_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_tree_op.py @@ -18,7 +18,6 @@ import numpy as np from op_test import OpTest import paddle -import paddle.fluid as fluid from paddle.fluid.framework import Program, program_guard @@ -58,14 +57,11 @@ class TestGatherTreeOp(OpTest): class TestGatherTreeOpAPI(unittest.TestCase): def test_case(self): paddle.enable_static() - ids = fluid.layers.data( - name='ids', shape=[5, 2, 2], dtype='int64', append_batch_size=False - ) - parents = fluid.layers.data( + ids = paddle.static.data(name='ids', shape=[5, 2, 2], dtype='int64') + parents = paddle.static.data( name='parents', shape=[5, 2, 2], dtype='int64', - append_batch_size=False, ) final_sequences = paddle.nn.functional.gather_tree(ids, parents) paddle.disable_static() @@ -84,17 +80,9 @@ class TestGatherTreeOpError(unittest.TestCase): def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): - ids = fluid.layers.data( - name='ids', - shape=[5, 2, 2], - dtype='int64', - append_batch_size=False, - ) - parents = fluid.layers.data( - name='parents', - shape=[5, 2, 2], - dtype='int64', - append_batch_size=False, + ids = paddle.static.data(name='ids', shape=[5, 2, 2], dtype='int64') + parents = paddle.static.data( + name='parents', shape=[5, 2, 2], dtype='int64' ) def test_Variable_ids(): @@ -113,11 +101,8 @@ class TestGatherTreeOpError(unittest.TestCase): def test_type_ids(): # dtype must be int32 or int64 - bad_ids = fluid.layers.data( - name='bad_ids', - shape=[5, 2, 2], - dtype='float32', - append_batch_size=False, + bad_ids = paddle.static.data( + name='bad_ids', shape=[5, 2, 2], dtype='float32' ) paddle.nn.functional.gather_tree(bad_ids, parents) @@ -125,33 +110,24 @@ class TestGatherTreeOpError(unittest.TestCase): def test_type_parents(): # dtype must be int32 or int64 - bad_parents = fluid.layers.data( - name='bad_parents', - shape=[5, 2, 2], - dtype='float32', - append_batch_size=False, + bad_parents = paddle.static.data( + name='bad_parents', shape=[5, 2, 2], dtype='float32' ) paddle.nn.functional.gather_tree(ids, bad_parents) self.assertRaises(TypeError, test_type_parents) def test_ids_ndim(): - bad_ids = fluid.layers.data( - name='bad_test_ids', - shape=[5, 2], - dtype='int64', - append_batch_size=False, + bad_ids = paddle.static.data( + name='bad_test_ids', shape=[5, 2], dtype='int64' ) paddle.nn.functional.gather_tree(bad_ids, parents) self.assertRaises(ValueError, test_ids_ndim) def test_parents_ndim(): - bad_parents = fluid.layers.data( - name='bad_test_parents', - shape=[5, 2], - dtype='int64', - append_batch_size=False, + bad_parents = paddle.static.data( + name='bad_test_parents', shape=[5, 2], dtype='int64' ) paddle.nn.functional.gather_tree(ids, bad_parents) diff --git a/python/paddle/fluid/tests/unittests/test_generator_dataloader.py b/python/paddle/fluid/tests/unittests/test_generator_dataloader.py index 6d7b991425..fcbc91edee 100644 --- a/python/paddle/fluid/tests/unittests/test_generator_dataloader.py +++ b/python/paddle/fluid/tests/unittests/test_generator_dataloader.py @@ -43,10 +43,12 @@ def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): with fluid.unique_name.guard(): with fluid.program_guard(main_prog, startup_prog): - image = fluid.layers.data( - name='image', shape=[784], dtype='float32' + image = paddle.static.data( + name='image', shape=[-1, 784], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') py_reader = fluid.io.DataLoader.from_generator( feed_list=[image, label], capacity=4, diff --git a/python/paddle/fluid/tests/unittests/test_group_norm_op.py b/python/paddle/fluid/tests/unittests/test_group_norm_op.py index 52a233658a..01cd8108a3 100644 --- a/python/paddle/fluid/tests/unittests/test_group_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_group_norm_op.py @@ -52,8 +52,8 @@ class TestGroupNormOpError(unittest.TestCase): self.assertRaises(TypeError, test_x_type) def test_x_dtype(): - x2 = fluid.layers.data( - name='x2', shape=[2, 100, 3, 5], dtype='int32' + x2 = paddle.static.data( + name='x2', shape=[-1, 2, 100, 3, 5], dtype='int32' ) groups = 2 paddle.static.nn.group_norm(x2, groups) diff --git a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py index 6c5bc338a5..75e5d1ee2e 100644 --- a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py +++ b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py @@ -286,14 +286,14 @@ class TestHSigmoidOpSparse(OpTest): class TestHSigmoidOpWithSparseGrad(unittest.TestCase): def hs_net_conf(self, is_sparse): - input_word = fluid.layers.data(name="x", shape=[1], dtype='int64') - path_table = fluid.layers.data( - name='path_table', shape=[3], dtype='int64' + input_word = paddle.static.data(name="x", shape=[-1, 1], dtype='int64') + path_table = paddle.static.data( + name='path_table', shape=[-1, 3], dtype='int64' ) - path_code = fluid.layers.data( - name='path_code', shape=[3], dtype='int64' + path_code = paddle.static.data( + name='path_code', shape=[-1, 3], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') data_list = [input_word, path_table, path_code, label] diff --git a/python/paddle/fluid/tests/unittests/test_identity_loss_op.py b/python/paddle/fluid/tests/unittests/test_identity_loss_op.py index 1a0ff98b17..d9b8ee8fad 100644 --- a/python/paddle/fluid/tests/unittests/test_identity_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_identity_loss_op.py @@ -103,7 +103,7 @@ class TestIdentityLossOpError(unittest.TestCase): self.assertRaises(Exception, test_string) def test_dtype(): - x2 = fluid.layers.data(name='x2', shape=[1], dtype='int32') + x2 = paddle.static.data(name='x2', shape=[-1, 1], dtype='int32') paddle.incubate.identity_loss(x=x2, reduction=1) self.assertRaises(TypeError, test_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_image_classification_layer.py b/python/paddle/fluid/tests/unittests/test_image_classification_layer.py index c485a51828..7ffa8f5e53 100644 --- a/python/paddle/fluid/tests/unittests/test_image_classification_layer.py +++ b/python/paddle/fluid/tests/unittests/test_image_classification_layer.py @@ -39,8 +39,8 @@ class TestLayer(unittest.TestCase): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): - images = fluid.layers.data( - name='pixel', shape=[3, 48, 48], dtype='float32' + images = paddle.static.data( + name='pixel', shape=[-1, 3, 48, 48], dtype='float32' ) hidden1 = paddle.static.nn.batch_norm(input=images) hidden2 = paddle.static.nn.fc( @@ -54,8 +54,8 @@ class TestLayer(unittest.TestCase): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): - images = fluid.layers.data( - name='pixel', shape=[3, 48, 48], dtype='float32' + images = paddle.static.data( + name='pixel', shape=[-1, 3, 48, 48], dtype='float32' ) paddle.nn.functional.dropout(x=images, p=0.5) @@ -66,8 +66,8 @@ class TestLayer(unittest.TestCase): startup_program = Program() with fluid.program_guard(main_program, startup_program): - images = fluid.layers.data( - name='pixel', shape=[3, 48, 48], dtype='float32' + images = paddle.static.data( + name='pixel', shape=[-1, 3, 48, 48], dtype='float32' ) conv1 = conv_block(images, 64, 2, [0.3, 0]) conv_block(conv1, 256, 3, [0.4, 0.4, 0]) @@ -78,11 +78,11 @@ class TestLayer(unittest.TestCase): main_program = Program() startup_program = Program() with fluid.program_guard(main_program, startup_program): - image1 = fluid.layers.data( - name='pixel1', shape=[3, 48, 48], dtype='float32' + image1 = paddle.static.data( + name='pixel1', shape=[-1, 3, 48, 48], dtype='float32' ) - image2 = fluid.layers.data( - name='pixel2', shape=[3, 48, 48], dtype='float32' + image2 = paddle.static.data( + name='pixel2', shape=[-1, 3, 48, 48], dtype='float32' ) paddle.nn.functional.relu(paddle.add(x=image1, y=image2)) print(main_program) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py index 21f95491fa..f34c8d6a2a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py @@ -264,9 +264,9 @@ class TestDygraphDeepCF(unittest.TestCase): scope = fluid.core.Scope() with new_program_scope(main=main, startup=startup, scope=scope): - users = fluid.layers.data('users', [1], dtype='int32') - items = fluid.layers.data('items', [1], dtype='int32') - labels = fluid.layers.data('labels', [1], dtype='float32') + users = paddle.static.data('users', [-1, 1], dtype='int32') + items = paddle.static.data('items', [-1, 1], dtype='int32') + labels = paddle.static.data('labels', [-1, 1], dtype='float32') deepcf = DeepCF(num_users, num_items, matrix) prediction = deepcf(users, items) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_framework.py b/python/paddle/fluid/tests/unittests/test_imperative_framework.py index c0676fcc85..a14a508c68 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_framework.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_framework.py @@ -56,9 +56,7 @@ class TestDygraphFramework(unittest.TestCase): def test_dygraph_backward(self): with new_program_scope(): mlp = MLP(input_size=2) - var_inp = fluid.layers.data( - "input", shape=[2, 2], dtype="float32", append_batch_size=False - ) + var_inp = paddle.static.data("input", shape=[2, 2], dtype="float32") out = mlp(var_inp) try: out.backward() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gan.py b/python/paddle/fluid/tests/unittests/test_imperative_gan.py index 0ab0703b16..34806a8305 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gan.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gan.py @@ -70,12 +70,8 @@ class TestDygraphGAN(unittest.TestCase): discriminator = Discriminator() generator = Generator() - img = fluid.layers.data( - name="img", shape=[2, 1], append_batch_size=False - ) - noise = fluid.layers.data( - name="noise", shape=[2, 2], append_batch_size=False - ) + img = paddle.static.data(name="img", shape=[2, 1]) + noise = paddle.static.data(name="noise", shape=[2, 2]) d_real = discriminator(img) d_loss_real = paddle.mean( @@ -106,9 +102,7 @@ class TestDygraphGAN(unittest.TestCase): discriminator = Discriminator() generator = Generator() - noise = fluid.layers.data( - name="noise", shape=[2, 2], append_batch_size=False - ) + noise = paddle.static.data(name="noise", shape=[2, 2]) d_fake = discriminator(generator(noise)) g_loss = paddle.mean( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py index 54dffdf723..25eafb2daa 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py @@ -71,24 +71,15 @@ class TestDygraphGNN(unittest.TestCase): scope = fluid.core.Scope() with new_program_scope(main=main, startup=startup, scope=scope): - features = fluid.layers.data( - name='features', - shape=[1, 100, 50], - dtype='float32', - append_batch_size=False, + features = paddle.static.data( + name='features', shape=[1, 100, 50], dtype='float32' ) # Use selected rows when it's supported. - adj = fluid.layers.data( - name='adj', - shape=[1, 100, 100], - dtype='float32', - append_batch_size=False, + adj = paddle.static.data( + name='adj', shape=[1, 100, 100], dtype='float32' ) - labels = fluid.layers.data( - name='labels', - shape=[100, 1], - dtype='int64', - append_batch_size=False, + labels = paddle.static.data( + name='labels', shape=[100, 1], dtype='int64' ) model = GCN('test_gcn', 50) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py index 0a775f75e7..0eb037bc6a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py @@ -163,11 +163,12 @@ class TestDygraphSimpleNet(unittest.TestCase): exe = fluid.Executor(place) sgd = SGDOptimizer(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype=dtype) - + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype=dtype) + y.desc.set_need_check_feed(False) static_loss = simple_net(x, y) sgd.minimize(static_loss) static_param_updated = dict() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py index 0e813147ad..9c187d73b8 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py @@ -195,10 +195,12 @@ class TestImperativeMnist(unittest.TestCase): drop_last=True, ) - img = fluid.layers.data( - name='pixel', shape=[1, 28, 28], dtype='float32' + img = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') cost = mnist(img) loss = paddle.nn.functional.cross_entropy( cost, label, reduction='none', use_softmax=False diff --git a/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py index 64594acb28..3acf627661 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py @@ -98,10 +98,12 @@ class TestImperativeMnistSortGradient(unittest.TestCase): paddle.dataset.mnist.train(), batch_size=128, drop_last=True ) - img = fluid.layers.data( - name='pixel', shape=[1, 28, 28], dtype='float32' + img = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') cost = mnist(img) loss = paddle.nn.functional.cross_entropy( cost, label, reduction='none', use_softmax=False diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py index 6217ef0b1b..12be3af2d9 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py @@ -537,15 +537,19 @@ class TestDygraphOCRAttention(unittest.TestCase): optimizer = fluid.optimizer.SGD(learning_rate=0.001) - images = fluid.layers.data( - name='pixel', shape=Config.DATA_SHAPE, dtype='float32' + images = paddle.static.data( + name='pixel', shape=[-1] + Config.DATA_SHAPE, dtype='float32' ) - static_label_in = fluid.layers.data( - name='label_in', shape=[1], dtype='int64', lod_level=0 + images.desc.set_need_check_feed(False) + static_label_in = paddle.static.data( + name='label_in', shape=[-1, 1], dtype='int64', lod_level=0 ) - static_label_out = fluid.layers.data( - name='label_out', shape=[1], dtype='int64', lod_level=0 + static_label_in.desc.set_need_check_feed(False) + static_label_out = paddle.static.data( + name='label_out', shape=[-1, 1], dtype='int64', lod_level=0 ) + static_label_out.desc.set_need_check_feed(False) + static_label_out.stop_gradient = True static_label_out.trainable = False diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index ab14905880..31f066a1f1 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -173,10 +173,12 @@ class TestImperativeOptimizerBase(unittest.TestCase): paddle.dataset.mnist.train(), batch_size=128, drop_last=True ) - img = fluid.layers.data( - name='pixel', shape=[1, 28, 28], dtype='float32' + img = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') img = paddle.reshape(img, shape=[batch_size, 784]) cost = mlp(img) avg_loss = paddle.mean(cost) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py index 2246cc25a2..d038e89e1f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py @@ -182,10 +182,12 @@ class TestImperativeOptimizerBase(unittest.TestCase): paddle.dataset.mnist.train(), batch_size=128, drop_last=True ) - img = fluid.layers.data( - name='pixel', shape=[1, 28, 28], dtype='float32' + img = paddle.static.data( + name='pixel', shape=[-1, 1, 28, 28], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') img = paddle.reshape(img, shape=[batch_size, 784]) cost = mlp(img) avg_loss = paddle.mean(cost) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index b24b24ebc6..6eb0c9d6e6 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -328,16 +328,20 @@ class TestDygraphPtbRnn(unittest.TestCase): else fluid.CUDAPlace(0) ) sgd = SGDOptimizer(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data( - name="init_hidden", shape=[1], dtype='float32' + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32') + y.desc.set_need_check_feed(False) + init_hidden = paddle.static.data( + name="init_hidden", shape=[-1, 1], dtype='float32' ) - init_cell = fluid.layers.data( - name="init_cell", shape=[1], dtype='float32' + init_hidden.desc.set_need_check_feed(False) + init_cell = paddle.static.data( + name="init_cell", shape=[-1, 1], dtype='float32' ) + init_cell.desc.set_need_check_feed(False) static_loss, static_last_hidden, static_last_cell = ptb_model( x, y, init_hidden, init_cell diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py index 01b1d18070..a586c98de1 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn_sorted_gradient.py @@ -116,16 +116,20 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): else fluid.CUDAPlace(0) ) sgd = SGDOptimizer(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps, 1], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data( - name="init_hidden", shape=[1], dtype='float32' + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32') + y.desc.set_need_check_feed(False) + init_hidden = paddle.static.data( + name="init_hidden", shape=[-1, 1], dtype='float32' ) - init_cell = fluid.layers.data( - name="init_cell", shape=[1], dtype='float32' + init_hidden.desc.set_need_check_feed(False) + init_cell = paddle.static.data( + name="init_cell", shape=[-1, 1], dtype='float32' ) + init_cell.desc.set_need_check_feed(False) static_loss, static_last_hidden, static_last_cell = ptb_model( x, y, init_hidden, init_cell diff --git a/python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py b/python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py index e40cb63206..2d57340b95 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_recurrent_usage.py @@ -80,12 +80,8 @@ class TestRecurrentFeed(unittest.TestCase): with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - in1 = fluid.layers.data( - name="inp1", shape=[2, 2], append_batch_size=False - ) - in2 = fluid.layers.data( - name="inp2", shape=[2, 2], append_batch_size=False - ) + in1 = paddle.static.data(name="inp1", shape=[2, 2]) + in2 = paddle.static.data(name="inp2", shape=[2, 2]) rt1 = RecurrentTest("RecurrentTest") static_sum_out, static_out = rt1(in1, in2) fluid.backward.append_backward(static_sum_out) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py index 1c548f5cae..88440c0cf2 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_reinforcement.py @@ -126,14 +126,14 @@ class TestImperativeMnist(unittest.TestCase): st_sgd = SGDOptimizer(learning_rate=1e-3) - st_state = fluid.layers.data( - name='st_state', shape=[4], dtype='float32' + st_state = paddle.static.data( + name='st_state', shape=[-1, 4], dtype='float32' ) - st_reward = fluid.layers.data( - name='st_reward', shape=[1], dtype='float32' + st_reward = paddle.static.data( + name='st_reward', shape=[-1, 1], dtype='float32' ) - st_mask = fluid.layers.data( - name='st_mask', shape=[2], dtype='float32' + st_mask = paddle.static.data( + name='st_mask', shape=[-1, 2], dtype='float32' ) st_loss_probs = policy(st_state) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index b20ceb091b..bc46ad12d3 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -361,10 +361,12 @@ class TestDygraphResnet(unittest.TestCase): batch_size=batch_size, ) - img = fluid.layers.data( - name='pixel', shape=[3, 224, 224], dtype='float32' + img = paddle.static.data( + name='pixel', shape=[-1, 3, 224, 224], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') out = resnet(img) loss = paddle.nn.functional.cross_entropy( input=out, label=label, reduction='none', use_softmax=False diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py index 798890a489..9d6d2ebabd 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py @@ -170,10 +170,12 @@ class TestDygraphResnetSortGradient(unittest.TestCase): batch_size=batch_size, ) - img = fluid.layers.data( - name='pixel', shape=[3, 224, 224], dtype='float32' + img = paddle.static.data( + name='pixel', shape=[-1, 3, 224, 224], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') out = resnet(img) loss = paddle.nn.functional.cross_entropy( input=out, label=label, reduction='none', use_softmax=False diff --git a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py index b02e1595db..6c95805602 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py @@ -444,10 +444,12 @@ class TestImperativeResneXt(unittest.TestCase): drop_last=True, ) - img = fluid.layers.data( - name='pixel', shape=[3, 224, 224], dtype='float32' + img = paddle.static.data( + name='pixel', shape=[-1, 3, 224, 224], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') out = se_resnext(img) softmax_out = paddle.nn.function.softmax(out) loss = paddle.nn.functional.cross_entropy( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py index cd707bb0ca..647710fba6 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py @@ -173,11 +173,12 @@ class TestDygraphSimpleNet(unittest.TestCase): exe = fluid.Executor(place) sgd = SGDOptimizer(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype=dtype) - + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype=dtype) + y.desc.set_need_check_feed(False) static_loss = simple_net(x, y) sgd.minimize(static_loss) static_param_updated = dict() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py index 8e213c2d2a..e171899289 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer_sorted_gradient.py @@ -221,14 +221,13 @@ def make_all_inputs(input_fields): """ inputs = [] for input_field in input_fields: - input_var = fluid.layers.data( + input_var = paddle.static.data( name=input_field, shape=input_descs[input_field][0], dtype=input_descs[input_field][1], lod_level=input_descs[input_field][2] if len(input_descs[input_field]) == 3 else 0, - append_batch_size=False, ) inputs.append(input_var) return inputs diff --git a/python/paddle/fluid/tests/unittests/test_index_select_op.py b/python/paddle/fluid/tests/unittests/test_index_select_op.py index 39895f2691..2234280939 100644 --- a/python/paddle/fluid/tests/unittests/test_index_select_op.py +++ b/python/paddle/fluid/tests/unittests/test_index_select_op.py @@ -97,10 +97,8 @@ class TestIndexSelectAPI(unittest.TestCase): # case 1: with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 4]) - index = fluid.layers.data( - name='index', shape=[3], dtype='int32', append_batch_size=False - ) + x = paddle.static.data(name='x', shape=[-1, 4]) + index = paddle.static.data(name='index', shape=[3], dtype='int32') z = paddle.index_select(x, index, axis=1) exe = fluid.Executor(fluid.CPUPlace()) (res,) = exe.run( @@ -115,10 +113,8 @@ class TestIndexSelectAPI(unittest.TestCase): # case 2: with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 4]) - index = fluid.layers.data( - name='index', shape=[3], dtype='int32', append_batch_size=False - ) + x = paddle.static.data(name='x', shape=[-1, 4]) + index = paddle.static.data(name='index', shape=[3], dtype='int32') z = paddle.index_select(x, index) exe = fluid.Executor(fluid.CPUPlace()) (res,) = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py index f6b3ba6b69..fd8523bfc1 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/fluid/tests/unittests/test_inference_model_io.py @@ -24,7 +24,6 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.executor as executor -import paddle.fluid.layers as layers import paddle.fluid.optimizer as optimizer from paddle.distributed.io import load_inference_model_distributed from paddle.fluid.compiler import CompiledProgram @@ -55,8 +54,8 @@ class TestBook(unittest.TestCase): program = Program() with program_guard(program, init_program): - x = layers.data(name='x', shape=[2], dtype='float32') - y = layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x=x, size=1, activation=None) @@ -163,8 +162,8 @@ class TestSaveInferenceModel(unittest.TestCase): # fake program without feed/fetch with program_guard(program, init_program): - x = layers.data(name='x', shape=[2], dtype='float32') - y = layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) @@ -188,8 +187,8 @@ class TestSaveInferenceModel(unittest.TestCase): # fake program without feed/fetch with program_guard(program, init_program): - x = layers.data(name='x', shape=[2], dtype='float32') - y = layers.data(name='y', shape=[1], dtype='int32') + x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='int32') predict = paddle.static.nn.fc(x, size=2, activation='softmax') acc = paddle.static.accuracy(input=predict, label=y) auc_var, batch_auc_var, auc_states = paddle.static.auc( @@ -223,8 +222,8 @@ class TestInstance(unittest.TestCase): # fake program without feed/fetch with program_guard(program, init_program): - x = layers.data(name='x', shape=[2], dtype='float32') - y = layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) @@ -261,8 +260,8 @@ class TestSaveInferenceModelNew(unittest.TestCase): # fake program without feed/fetch with program_guard(program, init_program): - x = layers.data(name='x', shape=[2], dtype='float32') - y = layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) @@ -440,8 +439,8 @@ class TestSaveInferenceModelNew(unittest.TestCase): # fake program without feed/fetch with program_guard(program, init_program): - x = layers.data(name='x', shape=[2], dtype='float32') - y = layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) @@ -489,8 +488,8 @@ class TestSaveInferenceModelNew(unittest.TestCase): # fake program without feed/fetch with program_guard(program, init_program): - x = layers.data(name='x', shape=[2], dtype='float32') - y = layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) diff --git a/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py b/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py index 299d3218cf..a4cc038bd7 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_abn_op.py @@ -48,13 +48,13 @@ class TestInplaceANBOpTraining(unittest.TestCase): startup.random_seed = seed with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - data = fluid.layers.data( + data = paddle.static.data( name='input', shape=self.dshape, dtype=self.dtype, - append_batch_size=False, - stop_gradient=False, ) + data.stop_gradient = False + data.desc.set_need_check_feed(False) bn = paddle.static.nn.batch_norm( data, diff --git a/python/paddle/fluid/tests/unittests/test_inplace_softmax_with_cross_entropy.py b/python/paddle/fluid/tests/unittests/test_inplace_softmax_with_cross_entropy.py index 16477e086d..b614709ec9 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_softmax_with_cross_entropy.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_softmax_with_cross_entropy.py @@ -37,18 +37,18 @@ class TestSoftmaxWithXe(unittest.TestCase): m, n = x.shape with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.scope_guard(fluid.Scope()): - x_d = fluid.layers.data( + x_d = paddle.static.data( name='x', shape=[m, n], dtype=self.dtype, - append_batch_size=False, ) - y_d = fluid.layers.data( + x_d.desc.set_need_check_feed(False) + y_d = paddle.static.data( name='y', shape=[m, 1] if not self.soft_label else [m, n], dtype='int64' if not self.soft_label else self.dtype, - append_batch_size=False, ) + y_d.desc.set_need_check_feed(False) z_d, s_d = paddle.nn.functional.softmax_with_cross_entropy( x_d, y_d, diff --git a/python/paddle/fluid/tests/unittests/test_instance_norm_op.py b/python/paddle/fluid/tests/unittests/test_instance_norm_op.py index aac98fec5c..48932e5581 100644 --- a/python/paddle/fluid/tests/unittests/test_instance_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_instance_norm_op.py @@ -243,7 +243,9 @@ class TestInstanceNormOpError(unittest.TestCase): self.assertRaises(TypeError, paddle.static.nn.instance_norm, x1) # the input dtype of instance_norm must be float32 or float64 - x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="int32") + x2 = paddle.static.data( + name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32" + ) self.assertRaises(TypeError, paddle.static.nn.instance_norm, x2) @@ -251,9 +253,7 @@ class TestInstanceNormOpErrorCase1(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # the first dimension of input for instance_norm must between [2d, 5d] - x = fluid.layers.data( - name='x', shape=[3], dtype="float32", append_batch_size=False - ) + x = paddle.static.data(name='x', shape=[3], dtype="float32") self.assertRaises(ValueError, paddle.static.nn.instance_norm, x) diff --git a/python/paddle/fluid/tests/unittests/test_ir_inplace_pass.py b/python/paddle/fluid/tests/unittests/test_ir_inplace_pass.py index 48156b7448..bc4ef3d386 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_inplace_pass.py +++ b/python/paddle/fluid/tests/unittests/test_ir_inplace_pass.py @@ -24,8 +24,8 @@ import paddle.fluid.core as core def fc_with_batchnorm(use_feed): - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data(name='image', shape=[-1, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') hidden = img for _ in range(3): diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py index 9e51118e96..b64090996b 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py @@ -18,13 +18,12 @@ import numpy as np from parallel_executor_test_base import DeviceType, TestParallelExecutorBase import paddle -import paddle.fluid as fluid import paddle.fluid.core as core def _feed_data_helper(): - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data(name='image', shape=[-1, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') return img, label diff --git a/python/paddle/fluid/tests/unittests/test_lambv2_op.py b/python/paddle/fluid/tests/unittests/test_lambv2_op.py index f8f65f5a14..f7aa3f6fbd 100644 --- a/python/paddle/fluid/tests/unittests/test_lambv2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lambv2_op.py @@ -123,8 +123,10 @@ class TestLambOpWithCombinedOp(unittest.TestCase): with fluid.program_guard(main, startup): main.random_seed = seed startup.random_seed = seed - x = fluid.layers.data(name='X', shape=[13], dtype='float32') - y = fluid.layers.data(name='Y', shape=[1], dtype='float32') + x = paddle.static.data( + name='X', shape=[-1, 13], dtype='float32' + ) + y = paddle.static.data(name='Y', shape=[-1, 1], dtype='float32') prediction = paddle.static.nn.fc(x, size=1, activation=None) loss = paddle.nn.functional.square_error_cost( input=prediction, label=y diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py index 0240b411b0..0878f46807 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py @@ -333,12 +333,7 @@ class TestLayerNormOp(unittest.TestCase): class TestLayerNormAPI(unittest.TestCase): def test_case(self): - x = fluid.layers.data( - name='x', - shape=[64, 32, 256], - dtype='float32', - append_batch_size=False, - ) + x = paddle.static.data(name='x', shape=[64, 32, 256], dtype='float32') x = paddle.static.nn.layer_norm( x, scale=True, @@ -380,7 +375,9 @@ class TestDygraphLayerNormAPIError(unittest.TestCase): # the input dtype of LayerNorm must be float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data(name='x2', shape=[3, 32, 32], dtype="int32") + x2 = paddle.static.data( + name='x2', shape=[-1, 3, 32, 32], dtype="int32" + ) self.assertRaises(TypeError, layer_norm, x2) diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index e33cde3179..51715e2ae1 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -108,11 +108,10 @@ class TestLayer(LayerTest): def test_dropout(self): inp = np.ones([3, 32, 32], dtype='float32') with self.static_graph(): - t = layers.data( + t = paddle.static.data( name='data', shape=[3, 32, 32], dtype='float32', - append_batch_size=False, ) dropout = paddle.nn.Dropout(p=0.35) ret = dropout(t) @@ -135,11 +134,8 @@ class TestLayer(LayerTest): def test_linear(self): inp = np.ones([3, 32, 32], dtype='float32') with self.static_graph(): - t = layers.data( - name='data', - shape=[3, 32, 32], - dtype='float32', - append_batch_size=False, + t = paddle.static.data( + name='data', shape=[3, 32, 32], dtype='float32' ) linear = paddle.nn.Linear( 32, 4, bias_attr=fluid.initializer.ConstantInitializer(value=1) @@ -228,11 +224,8 @@ class TestLayer(LayerTest): def test_Flatten(self): inp = np.ones([3, 4, 4, 5], dtype='float32') with self.static_graph(): - t = layers.data( - name='data', - shape=[3, 4, 4, 5], - dtype='float32', - append_batch_size=False, + t = paddle.static.data( + name='data', shape=[3, 4, 4, 5], dtype='float32' ) flatten = paddle.nn.Flatten() ret = flatten(t) @@ -277,7 +270,9 @@ class TestLayer(LayerTest): def test_SyncBatchNorm(self): if core.is_compiled_with_cuda(): with self.static_graph(): - t = layers.data(name='t', shape=[-1, 3, 5, 5], dtype='float32') + t = paddle.static.data( + name='t', shape=[-1, 3, 5, 5], dtype='float32' + ) my_sync_bn = paddle.nn.SyncBatchNorm(3) ret = my_sync_bn(t) static_ret = self.get_static_graph_result( @@ -294,7 +289,7 @@ class TestLayer(LayerTest): def test_relu(self): with self.static_graph(): - t = layers.data(name='t', shape=[3, 3], dtype='float32') + t = paddle.static.data(name='t', shape=[-1, 3, 3], dtype='float32') ret = F.relu(t) static_ret = self.get_static_graph_result( feed={'t': np.ones([3, 3], dtype='float32')}, fetch_list=[ret] @@ -309,8 +304,10 @@ class TestLayer(LayerTest): def test_matmul(self): with self.static_graph(): - t = layers.data(name='t', shape=[3, 3], dtype='float32') - t2 = layers.data(name='t2', shape=[3, 3], dtype='float32') + t = paddle.static.data(name='t', shape=[-1, 3, 3], dtype='float32') + t2 = paddle.static.data( + name='t2', shape=[-1, 3, 3], dtype='float32' + ) ret = paddle.matmul(t, t2) static_ret = self.get_static_graph_result( feed={ @@ -337,12 +334,22 @@ class TestLayer(LayerTest): n6 = np.ones([3, 3], dtype='float32') * 5 with self.static_graph(): - t = layers.data(name='t', shape=[3, 3], dtype='float32') - t2 = layers.data(name='t2', shape=[3, 3], dtype='float32') - t3 = layers.data(name='t3', shape=[3, 3], dtype='float32') - t4 = layers.data(name='t4', shape=[3, 3], dtype='float32') - t5 = layers.data(name='t5', shape=[3, 3], dtype='float32') - t6 = layers.data(name='t6', shape=[3, 3], dtype='float32') + t = paddle.static.data(name='t', shape=[-1, 3, 3], dtype='float32') + t2 = paddle.static.data( + name='t2', shape=[-1, 3, 3], dtype='float32' + ) + t3 = paddle.static.data( + name='t3', shape=[-1, 3, 3], dtype='float32' + ) + t4 = paddle.static.data( + name='t4', shape=[-1, 3, 3], dtype='float32' + ) + t5 = paddle.static.data( + name='t5', shape=[-1, 3, 3], dtype='float32' + ) + t6 = paddle.static.data( + name='t6', shape=[-1, 3, 3], dtype='float32' + ) ret = paddle.add(t, t2) ret = paddle.pow(ret, t3) @@ -381,7 +388,9 @@ class TestLayer(LayerTest): def test_conv2d_transpose(self): inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32') with self.static_graph(): - img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32') + img = paddle.static.data( + name='pixel', shape=[-1, 3, 2, 2], dtype='float32' + ) out = paddle.static.nn.conv2d_transpose( input=img, num_filters=10, @@ -393,7 +402,9 @@ class TestLayer(LayerTest): feed={'pixel': inp_np}, fetch_list=[out] )[0] with self.static_graph(): - img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32') + img = paddle.static.data( + name='pixel', shape=[-1, 3, 2, 2], dtype='float32' + ) conv2d_transpose = paddle.nn.Conv2DTranspose( 3, 10, @@ -473,8 +484,8 @@ class TestLayer(LayerTest): # the input dtype of Conv2DTranspose must be float16 or float32 or float64 # float16 only can be set on GPU place def test_type(): - images = layers.data( - name='pixel', shape=[3, 5, 5], dtype='int32' + images = paddle.static.data( + name='pixel', shape=[-1, 3, 5, 5], dtype='int32' ) conv2d = paddle.nn.Conv2DTranspose(3, 3, [2, 2]) conv2d_ret2 = conv2d(images) @@ -486,12 +497,8 @@ class TestLayer(LayerTest): inp_np_y = np.array([[4, 5, 6]]).astype('float32') with self.static_graph(): - data_x = layers.data( - name='x', shape=[1, 3], dtype="float32", append_batch_size=False - ) - data_y = layers.data( - name='y', shape=[1, 3], dtype="float32", append_batch_size=False - ) + data_x = paddle.static.data(name='x', shape=[1, 3], dtype="float32") + data_y = paddle.static.data(name='y', shape=[1, 3], dtype="float32") out = paddle.static.nn.common.bilinear_tensor_product( data_x, data_y, @@ -505,12 +512,8 @@ class TestLayer(LayerTest): )[0] with self.static_graph(): - data_x = layers.data( - name='x', shape=[1, 3], dtype="float32", append_batch_size=False - ) - data_y = layers.data( - name='y', shape=[1, 3], dtype="float32", append_batch_size=False - ) + data_x = paddle.static.data(name='x', shape=[1, 3], dtype="float32") + data_y = paddle.static.data(name='y', shape=[1, 3], dtype="float32") btp = paddle.nn.Bilinear( 3, 3, @@ -542,11 +545,11 @@ class TestLayer(LayerTest): dy_rlt2_value = dy_rlt2.numpy() with self.static_graph(): - data_x2 = layers.data( - name='x', shape=[1, 3], dtype="float32", append_batch_size=False + data_x2 = paddle.static.data( + name='x', shape=[1, 3], dtype="float32" ) - data_y2 = layers.data( - name='y', shape=[1, 3], dtype="float32", append_batch_size=False + data_y2 = paddle.static.data( + name='y', shape=[1, 3], dtype="float32" ) out2 = paddle.static.nn.common.bilinear_tensor_product( data_x2, data_y2, 6, act='sigmoid' @@ -599,7 +602,10 @@ class TestLayer(LayerTest): inp_word = np.array([[[1]]]).astype('int64') dict_size = 20 with self.static_graph(): - data_t = layers.data(name='word', shape=[1], dtype='int64') + data_t = paddle.static.data( + name='word', shape=[-1, 1], dtype='int64' + ) + data_t.desc.set_need_check_feed(False) emb = layers.embedding( input=data_t, size=[dict_size, 32], @@ -610,7 +616,10 @@ class TestLayer(LayerTest): feed={'word': inp_word}, fetch_list=[emb] )[0] with self.static_graph(): - data_t = layers.data(name='word', shape=[1], dtype='int64') + data_t = paddle.static.data( + name='word', shape=[-1, 1], dtype='int64' + ) + data_t.desc.set_need_check_feed(False) emb2 = paddle.nn.Embedding( dict_size, 32, weight_attr='emb.w', sparse=False ) @@ -693,8 +702,8 @@ class TestLayer(LayerTest): def test_conv3d(self): with self.static_graph(): - images = layers.data( - name='pixel', shape=[3, 6, 6, 6], dtype='float32' + images = paddle.static.data( + name='pixel', shape=[-1, 3, 6, 6, 6], dtype='float32' ) ret = paddle.static.nn.conv3d( input=images, num_filters=3, filter_size=2 @@ -705,8 +714,8 @@ class TestLayer(LayerTest): )[0] with self.static_graph(): - images = layers.data( - name='pixel', shape=[3, 6, 6, 6], dtype='float32' + images = paddle.static.data( + name='pixel', shape=[-1, 3, 6, 6, 6], dtype='float32' ) conv3d = paddle.nn.Conv3D( in_channels=3, out_channels=3, kernel_size=2 @@ -783,12 +792,8 @@ class TestLayer(LayerTest): input = np.random.random(shape).astype('float32') with self.static_graph(): - X = fluid.layers.data( - name='X', - shape=shape, - dtype='float32', - lod_level=1, - append_batch_size=False, + X = paddle.static.data( + name='X', shape=shape, dtype='float32', lod_level=1 ) ret = paddle.static.nn.group_norm( input=X, @@ -807,12 +812,8 @@ class TestLayer(LayerTest): )[0] with self.static_graph(): - X = fluid.layers.data( - name='X', - shape=shape, - dtype='float32', - lod_level=1, - append_batch_size=False, + X = paddle.static.data( + name='X', shape=shape, dtype='float32', lod_level=1 ) groupNorm = paddle.nn.GroupNorm( num_channels=shape[1], @@ -855,18 +856,14 @@ class TestLayer(LayerTest): input = np.random.random(shape).astype('float32') with self.static_graph(): - X = fluid.layers.data( - name='X', shape=shape, dtype='float32', append_batch_size=False - ) + X = paddle.static.data(name='X', shape=shape, dtype='float32') ret = paddle.static.nn.instance_norm(input=X) static_ret = self.get_static_graph_result( feed={'X': input}, fetch_list=[ret] )[0] with self.static_graph(): - X = fluid.layers.data( - name='X', shape=shape, dtype='float32', append_batch_size=False - ) + X = paddle.static.data(name='X', shape=shape, dtype='float32') instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1]) ret = instanceNorm(X) static_ret2 = self.get_static_graph_result( @@ -914,12 +911,8 @@ class TestLayer(LayerTest): input = np.random.random(shape).astype('float32') with self.static_graph(): - Weight = fluid.layers.data( - name='Weight', - shape=shape, - dtype='float32', - lod_level=1, - append_batch_size=False, + Weight = paddle.static.data( + name='Weight', shape=shape, dtype='float32', lod_level=1 ) ret = paddle.static.nn.spectral_norm( weight=Weight, dim=1, power_iters=2 @@ -935,12 +928,8 @@ class TestLayer(LayerTest): )[0] with self.static_graph(): - Weight = fluid.layers.data( - name='Weight', - shape=shape, - dtype='float32', - lod_level=1, - append_batch_size=False, + Weight = paddle.static.data( + name='Weight', shape=shape, dtype='float32', lod_level=1 ) spectralNorm = paddle.nn.SpectralNorm(shape, dim=1, power_iters=2) ret = spectralNorm(Weight) @@ -968,7 +957,9 @@ class TestLayer(LayerTest): ) with self.static_graph(): - img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32') + img = paddle.static.data( + name='pixel', shape=[-1, 3, 2, 2, 2], dtype='float32' + ) out = paddle.static.nn.conv3d_transpose( input=img, num_filters=12, filter_size=12, use_cudnn=True ) @@ -976,7 +967,9 @@ class TestLayer(LayerTest): feed={'pixel': input_array}, fetch_list=[out] )[0] with self.static_graph(): - img = layers.data(name='pixel', shape=[3, 2, 2, 2], dtype='float32') + img = paddle.static.data( + name='pixel', shape=[-1, 3, 2, 2, 2], dtype='float32' + ) conv3d_transpose = paddle.nn.Conv3DTranspose( in_channels=3, out_channels=12, kernel_size=12 ) @@ -1081,8 +1074,8 @@ class TestLayer(LayerTest): value_b = np.arange(3) # less than with self.static_graph(): - a = layers.data(name='a', shape=[1], dtype='int64') - b = layers.data(name='b', shape=[1], dtype='int64') + a = paddle.static.data(name='a', shape=[-1, 1], dtype='int64') + b = paddle.static.data(name='b', shape=[-1, 1], dtype='int64') cond = paddle.less_than(x=a, y=b) static_ret = self.get_static_graph_result( feed={"a": value_a, "b": value_b}, fetch_list=[cond] @@ -1097,8 +1090,8 @@ class TestLayer(LayerTest): # less equal with self.static_graph(): - a1 = layers.data(name='a1', shape=[1], dtype='int64') - b1 = layers.data(name='b1', shape=[1], dtype='int64') + a1 = paddle.static.data(name='a1', shape=[-1, 1], dtype='int64') + b1 = paddle.static.data(name='b1', shape=[-1, 1], dtype='int64') cond1 = paddle.less_equal(x=a1, y=b1) static_ret1 = self.get_static_graph_result( feed={"a1": value_a, "b1": value_b}, fetch_list=[cond1] @@ -1113,8 +1106,8 @@ class TestLayer(LayerTest): # greater than with self.static_graph(): - a2 = layers.data(name='a2', shape=[1], dtype='int64') - b2 = layers.data(name='b2', shape=[1], dtype='int64') + a2 = paddle.static.data(name='a2', shape=[-1, 1], dtype='int64') + b2 = paddle.static.data(name='b2', shape=[-1, 1], dtype='int64') cond2 = paddle.greater_than(x=a2, y=b2) static_ret2 = self.get_static_graph_result( feed={"a2": value_a, "b2": value_b}, fetch_list=[cond2] @@ -1129,8 +1122,8 @@ class TestLayer(LayerTest): # greater equal with self.static_graph(): - a3 = layers.data(name='a3', shape=[1], dtype='int64') - b3 = layers.data(name='b3', shape=[1], dtype='int64') + a3 = paddle.static.data(name='a3', shape=[-1, 1], dtype='int64') + b3 = paddle.static.data(name='b3', shape=[-1, 1], dtype='int64') cond3 = paddle.greater_equal(x=a3, y=b3) static_ret3 = self.get_static_graph_result( feed={"a3": value_a, "b3": value_b}, fetch_list=[cond3] @@ -1145,8 +1138,8 @@ class TestLayer(LayerTest): # equal with self.static_graph(): - a4 = layers.data(name='a4', shape=[1], dtype='int64') - b4 = layers.data(name='b4', shape=[1], dtype='int64') + a4 = paddle.static.data(name='a4', shape=[-1, 1], dtype='int64') + b4 = paddle.static.data(name='b4', shape=[-1, 1], dtype='int64') cond4 = paddle.equal(x=a4, y=b4) static_ret4 = self.get_static_graph_result( feed={"a4": value_a, "b4": value_b}, fetch_list=[cond4] @@ -1161,8 +1154,8 @@ class TestLayer(LayerTest): # not equal with self.static_graph(): - a5 = layers.data(name='a5', shape=[1], dtype='int64') - b5 = layers.data(name='b5', shape=[1], dtype='int64') + a5 = paddle.static.data(name='a5', shape=[-1, 1], dtype='int64') + b5 = paddle.static.data(name='b5', shape=[-1, 1], dtype='int64') cond5 = paddle.equal(x=a5, y=b5) static_ret5 = self.get_static_graph_result( feed={"a5": value_a, "b5": value_b}, fetch_list=[cond5] @@ -1349,22 +1342,20 @@ class TestLayer(LayerTest): def test_crop_tensor(self): with self.static_graph(): - x = fluid.layers.data(name="x1", shape=[6, 5, 8]) - - dim1 = fluid.layers.data( - name="dim1", shape=[1], append_batch_size=False - ) - dim2 = fluid.layers.data( - name="dim2", shape=[1], append_batch_size=False + x = paddle.static.data( + name="x1", shape=[-1, 6, 5, 8], dtype="float32" ) + + dim1 = paddle.static.data(name="dim1", shape=[1], dtype="float32") + dim2 = paddle.static.data(name="dim2", shape=[1], dtype="float32") crop_shape1 = (1, 2, 4, 4) - crop_shape2 = fluid.layers.data( - name="crop_shape", shape=[4], append_batch_size=False + crop_shape2 = paddle.static.data( + name="crop_shape", shape=[4], dtype="float32" ) crop_shape3 = [-1, dim1, dim2, 4] crop_offsets1 = [0, 0, 1, 0] - crop_offsets2 = fluid.layers.data( - name="crop_offset", shape=[4], append_batch_size=False + crop_offsets2 = paddle.static.data( + name="crop_offset", shape=[4], dtype="float32" ) crop_offsets3 = [0, dim1, dim2, 0] @@ -1378,7 +1369,9 @@ class TestLayer(LayerTest): def test_shard_index(self): with self.static_graph(): - x = fluid.layers.data(name="label", shape=[4, 1], dtype='int64') + x = paddle.static.data( + name="label", shape=[-1, 4, 1], dtype='int64' + ) shard_label = paddle.shard_index( input=x, index_num=20, nshards=2, shard_id=0 ) @@ -1517,12 +1510,15 @@ class TestBook(LayerTest): self._feed_dict[name] = self._get_np_data( shape, dtype, append_batch_size ) - return layers.data( + if append_batch_size: + shape = [-1] + shape + data = paddle.static.data( name=name, shape=shape, dtype=dtype, - append_batch_size=append_batch_size, ) + data.desc.set_need_check_feed(False) + return data def make_fit_a_line(self): with program_guard( @@ -1803,8 +1799,8 @@ class TestBook(LayerTest): updates = self._get_data( name='updates', shape=[2, 3], - append_batch_size=False, dtype='float32', + append_batch_size=False, ) out = paddle.scatter(x, index=idx, updates=updates) return out @@ -2065,11 +2061,17 @@ class TestBook(LayerTest): def test_affine_grid(self): with self.static_graph(): - data = layers.data(name='data', shape=[2, 3, 3], dtype="float32") + data = paddle.static.data( + name='data', shape=[-1, 2, 3, 3], dtype="float32" + ) out = paddle.argsort(x=data, axis=1) - theta = layers.data(name="theta", shape=[2, 3], dtype="float32") - out_shape = layers.data(name="out_shape", shape=[-1], dtype="int32") + theta = paddle.static.data( + name="theta", shape=[-1, 2, 3], dtype="float32" + ) + out_shape = paddle.static.data( + name="out_shape", shape=[-1], dtype="int32" + ) data_0 = paddle.nn.functional.affine_grid(theta, out_shape) data_1 = paddle.nn.functional.affine_grid(theta, [5, 3, 28, 28]) @@ -2082,7 +2084,9 @@ class TestBook(LayerTest): ends = [3, 3, 4] strides = [1, 1, 1] with self.static_graph(): - x = layers.data(name="x", shape=[245, 30, 30], dtype="float32") + x = paddle.static.data( + name="x", shape=[-1, 245, 30, 30], dtype="float32" + ) out = paddle.strided_slice( x, axes=axes, starts=starts, ends=ends, strides=strides ) @@ -2101,31 +2105,38 @@ class TestBook(LayerTest): def test_sequence_expand(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - x = layers.data(name='x', shape=[10], dtype='float32') - y = layers.data( - name='y', shape=[10, 20], dtype='float32', lod_level=2 + x = paddle.static.data(name='x', shape=[-1, 10], dtype='float32') + y = paddle.static.data( + name='y', shape=[-1, 10, 20], dtype='float32', lod_level=2 ) return layers.sequence_expand(x=x, y=y, ref_level=1) def test_sequence_reshape(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - x = layers.data(name='x', shape=[8], dtype='float32', lod_level=1) + x = paddle.static.data( + name='x', shape=[-1, 8], dtype='float32', lod_level=1 + ) out = layers.sequence_reshape(input=x, new_dim=16) return out def test_sequence_unpad(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - x = layers.data(name='x', shape=[10, 5], dtype='float32') - length = layers.data(name='length', shape=[], dtype='int64') + x = paddle.static.data(name='x', shape=[-1, 10, 5], dtype='float32') + length = paddle.static.data( + name='length', shape=[-1], dtype='int64' + ) return layers.sequence_unpad(x=x, length=length) def test_sequence_softmax(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - seq_data = layers.data( - name='seq_data', shape=[10, 10], dtype='float32', lod_level=1 + seq_data = paddle.static.data( + name='seq_data', + shape=[-1, 10, 10], + dtype='float32', + lod_level=1, ) seq = paddle.static.nn.fc(x=seq_data, size=20) return layers.sequence_softmax(seq) @@ -2133,27 +2144,23 @@ class TestBook(LayerTest): def test_sequence_unsqueeze(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - x = layers.data(name='x', shape=[8, 2], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 8, 2], dtype='float32') out = paddle.unsqueeze(x, axis=[1]) return out def test_sequence_scatter(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - x = layers.data( - name='x', shape=[3, 6], append_batch_size=False, dtype='float32' - ) - idx = layers.data( + x = paddle.static.data(name='x', shape=[3, 6], dtype='float32') + idx = paddle.static.data( name='idx', shape=[12, 1], - append_batch_size=False, dtype='int32', lod_level=1, ) - updates = layers.data( + updates = paddle.static.data( name='updates', shape=[12, 1], - append_batch_size=False, dtype='float32', lod_level=1, ) @@ -2165,8 +2172,8 @@ class TestBook(LayerTest): with self.static_graph(): import numpy as np - seqs = layers.data( - name='x', shape=[10, 5], dtype='float32', lod_level=1 + seqs = paddle.static.data( + name='x', shape=[-1, 10, 5], dtype='float32', lod_level=1 ) offset = layers.assign(input=np.array([[0, 1]]).astype('int32')) length = layers.assign(input=np.array([[2, 1]]).astype('int32')) @@ -2178,8 +2185,8 @@ class TestBook(LayerTest): def test_shuffle_batch(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - x = layers.data( - name='X', shape=[4, 50], dtype='float32', lod_level=0 + x = paddle.static.data( + name='X', shape=[-1, 4, 50], dtype='float32', lod_level=0 ) out1 = fluid.contrib.layers.shuffle_batch(x) default_main_program().random_seed = 1000 @@ -2240,21 +2247,25 @@ class TestBook(LayerTest): def test_sequence_enumerate(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - x = layers.data(name="input", shape=[1], dtype='int32', lod_level=1) + x = paddle.static.data( + name="input", shape=[-1, 1], dtype='int32', lod_level=1 + ) out = layers.sequence_enumerate(input=x, win_size=2, pad_value=0) def test_row_conv(self): # TODO(minqiyang): dygraph do not support lod now with self.static_graph(): - x = layers.data(name='x', shape=[16], dtype='float32', lod_level=1) + x = paddle.static.data( + name='x', shape=[-1, 16], dtype='float32', lod_level=1 + ) out = paddle.static.nn.row_conv(input=x, future_context_size=2) return out def test_simple_conv2d(self): # TODO(minqiyang): dygraph do not support layers with param now with self.static_graph(): - images = layers.data( - name='pixel', shape=[3, 48, 48], dtype='float32' + images = paddle.static.data( + name='pixel', shape=[-1, 3, 48, 48], dtype='float32' ) return paddle.static.nn.conv2d( input=images, num_filters=3, filter_size=[4, 4] @@ -2263,16 +2274,17 @@ class TestBook(LayerTest): def test_squeeze(self): # TODO(minqiyang): dygraph do not support layers with param now with self.static_graph(): - x = layers.data(name='x', shape=[1, 1, 4], dtype='float32') + x = paddle.static.data( + name='x', shape=[-1, 1, 1, 4], dtype='float32' + ) out = paddle.squeeze(x, axis=[2]) return out def test_flatten(self): # TODO(minqiyang): dygraph do not support op without kernel now with self.static_graph(): - x = layers.data( + x = paddle.static.data( name='x', - append_batch_size=False, shape=[4, 4, 3], dtype="float32", ) @@ -2288,7 +2300,9 @@ class TestBook(LayerTest): def test_unfold(self): with self.static_graph(): - x = layers.data(name='x', shape=[3, 20, 20], dtype='float32') + x = paddle.static.data( + name='x', shape=[-1, 3, 20, 20], dtype='float32' + ) out = paddle.nn.functional.unfold(x, [3, 3], 1, 1, 1) return out @@ -2308,18 +2322,13 @@ class TestBook(LayerTest): with program_guard( fluid.default_main_program(), fluid.default_startup_program() ): - input = layers.data( + input = paddle.static.data( name='input_data', shape=[3, 3], - append_batch_size=False, dtype='float32', ) - x = layers.data( - name='x', shape=[3, 2], append_batch_size=False, dtype='float32' - ) - y = layers.data( - name='y', shape=[2, 3], append_batch_size=False, dtype='float32' - ) + x = paddle.static.data(name='x', shape=[3, 2], dtype='float32') + y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') out = paddle.addmm(input=input, x=x, y=y) return out diff --git a/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py b/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py index 1ed1a713e3..0c1327c5af 100644 --- a/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py +++ b/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py @@ -31,9 +31,9 @@ paddle.enable_static() def run_pserver(use_cuda, sync_mode, ip, port, trainers, trainer_id): remove_ps_flag(os.getpid()) - x = fluid.layers.data(name='x', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') # loss function cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) @@ -68,9 +68,9 @@ def run_pserver_with_empty_block( use_cuda, sync_mode, ip, port, trainers, trainer_id ): remove_ps_flag(os.getpid()) - x = fluid.layers.data(name='x', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, bias_attr=False) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') # loss function cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) diff --git a/python/paddle/fluid/tests/unittests/test_load_vars_shape_check.py b/python/paddle/fluid/tests/unittests/test_load_vars_shape_check.py index 3b2465a344..853b364f35 100644 --- a/python/paddle/fluid/tests/unittests/test_load_vars_shape_check.py +++ b/python/paddle/fluid/tests/unittests/test_load_vars_shape_check.py @@ -30,7 +30,9 @@ class TestLoadVarsShapeCheck(unittest.TestCase): startup_program_1 = fluid.Program() with fluid.program_guard(program_1, startup_program_1): - input = fluid.layers.data(name="x", shape=[-1, 10], dtype='float32') + input = paddle.static.data( + name="x", shape=[-1, 10], dtype='float32' + ) out = paddle.static.nn.fc(input, 20) place = fluid.CPUPlace() exe = Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py index ec9eee17c0..cc11e96f5a 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py @@ -16,6 +16,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle import enable_static @@ -233,7 +234,9 @@ class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase): self.set_initializer() with fluid.program_guard(self.prog, self.startup_prog): - x = fluid.layers.data(name='x', shape=self.ids_shape, dtype='int64') + x = paddle.static.data( + name='x', shape=self.ids_shape, dtype='int64' + ) self.emb = fluid.layers.embedding( input=x, size=self.w_shape, diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py index c022f56480..8cbc6242b3 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_bf16_op.py @@ -102,7 +102,9 @@ class TestEmbeddingLayerBF16ConstantInitializer(unittest.TestCase): self.set_initializer() with fluid.program_guard(self.prog, self.startup_prog): - x = fluid.layers.data(name='x', shape=self.ids_shape, dtype='int64') + x = paddle.static.data( + name='x', shape=[-1] + self.ids_shape, dtype='int64' + ) self.emb = fluid.input.embedding( input=x, size=self.w_shape, diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py index cea6858e0d..eb9c4c6089 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py @@ -200,8 +200,8 @@ class TestLookupTableIsSparse(unittest.TestCase): self.init_data() main_program = fluid.Program() with fluid.program_guard(main_program, fluid.Program()): - x = fluid.layers.data(name='x', shape=[5], dtype='int64') - y_ = fluid.layers.data(name='y_', shape=[5], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 5], dtype='int64') + y_ = paddle.static.data(name='y_', shape=[-1, 5], dtype='float32') emb = fluid.input.embedding( input=x, size=[10, 16], @@ -246,7 +246,7 @@ class TestLookupTableIsSparse(unittest.TestCase): class TestLookupTableApi(unittest.TestCase): def test_api(self): - x = fluid.layers.data(name='x', shape=[20], dtype='int64') + x = paddle.static.data(name='x', shape=[-1, 20], dtype='int64') emb = fluid.embedding(input=x, size=[128, 64]) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch.py b/python/paddle/fluid/tests/unittests/test_math_op_patch.py index 41cd39088f..4bf9933ca3 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch.py @@ -28,7 +28,7 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_add_scalar(self): - a = fluid.layers.data(name="a", shape=[1]) + a = paddle.static.data(name="a", shape=[-1, 1]) b = a + 10 ab = fluid.layers.concat(input=[a, b], axis=1) c = ab + 10 @@ -48,7 +48,7 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_radd_scalar(self): - a = fluid.layers.data(name="a", shape=[1]) + a = paddle.static.data(name="a", shape=[-1, 1]) b = 10 + a place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -60,7 +60,7 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_sub_scalar(self): - a = fluid.layers.data(name="a", shape=[1]) + a = paddle.static.data(name="a", shape=[-1, 1]) b = a - 10 place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -72,7 +72,7 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_rsub_scalar(self): - a = fluid.layers.data(name="a", shape=[1]) + a = paddle.static.data(name="a", shape=[-1, 1]) b = 10 - a place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -84,7 +84,7 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_mul_scalar(self): - a = fluid.layers.data(name="a", shape=[1]) + a = paddle.static.data(name="a", shape=[-1, 1]) b = a * 10 place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -96,7 +96,7 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_rmul_scalar(self): - a = fluid.layers.data(name="a", shape=[1]) + a = paddle.static.data(name="a", shape=[-1, 1]) b = 10 * a place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -108,7 +108,7 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_div_scalar(self): - a = fluid.layers.data(name="a", shape=[1]) + a = paddle.static.data(name="a", shape=[-1, 1]) b = a / 10 place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -120,7 +120,7 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_rdiv_scalar(self): - a = fluid.layers.data(name="a", shape=[1]) + a = paddle.static.data(name="a", shape=[-1, 1]) b = 10 / a place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -133,8 +133,8 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_div_two_tensor(self): - a = fluid.layers.data(name="a", shape=[1]) - b = fluid.layers.data(name="b", shape=[1]) + a = paddle.static.data(name="a", shape=[-1, 1]) + b = paddle.static.data(name="b", shape=[-1, 1]) c = a / b place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -149,8 +149,8 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_mul_two_tensor(self): - a = fluid.layers.data(name="a", shape=[1]) - b = fluid.layers.data(name="b", shape=[1]) + a = paddle.static.data(name="a", shape=[-1, 1]) + b = paddle.static.data(name="b", shape=[-1, 1]) c = a * b place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -165,8 +165,8 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_add_two_tensor(self): - a = fluid.layers.data(name="a", shape=[1]) - b = fluid.layers.data(name="b", shape=[1]) + a = paddle.static.data(name="a", shape=[-1, 1]) + b = paddle.static.data(name="b", shape=[-1, 1]) c = a + b place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -181,8 +181,8 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_sub_two_tensor(self): - a = fluid.layers.data(name="a", shape=[1]) - b = fluid.layers.data(name="b", shape=[1]) + a = paddle.static.data(name="a", shape=[-1, 1]) + b = paddle.static.data(name="b", shape=[-1, 1]) c = a - b place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -197,7 +197,7 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_integer_div(self): - a = fluid.layers.data(name="a", shape=[1], dtype='int64') + a = paddle.static.data(name="a", shape=[-1, 1], dtype='int64') b = a / 7 place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -211,8 +211,8 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_equal(self): - a = fluid.layers.data(name="a", shape=[1], dtype='float32') - b = fluid.layers.data(name="b", shape=[1], dtype='float32') + a = paddle.static.data(name="a", shape=[-1, 1], dtype='float32') + b = paddle.static.data(name="b", shape=[-1, 1], dtype='float32') c = a == b place = fluid.CPUPlace() @@ -231,9 +231,10 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_equal_and_cond(self): - a = fluid.layers.data(name="a", shape=[1], dtype='float32') - b = fluid.layers.data(name="b", shape=[1], dtype='float32') - + a = paddle.static.data(name="a", shape=[-1, 1], dtype='float32') + a.desc.set_need_check_feed(False) + b = paddle.static.data(name="b", shape=[-1, 1], dtype='float32') + b.desc.set_need_check_feed(False) one = paddle.ones(shape=[1], dtype='int32') zero = fluid.layers.zeros(shape=[1], dtype='int32') cond = one == zero @@ -241,8 +242,8 @@ class TestMathOpPatches(unittest.TestCase): place = fluid.CPUPlace() exe = fluid.Executor(place) - a_np = np.array([3, 4, 10, 14, 9, 18]).astype('float') - b_np = np.array([3, 4, 11, 15, 8, 18]).astype('float') + a_np = np.array([3, 4, 10, 14, 9, 18]).astype('float32') + b_np = np.array([3, 4, 11, 15, 8, 18]).astype('float32') (c_np,) = exe.run( fluid.default_main_program(), feed={"a": a_np, "b": b_np}, @@ -253,7 +254,8 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_neg(self): - a = fluid.layers.data(name="a", shape=[10, 1]) + a = paddle.static.data(name="a", shape=[-1, 10, 1], dtype='float32') + a.desc.set_need_check_feed(False) b = -a place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -266,7 +268,8 @@ class TestMathOpPatches(unittest.TestCase): @prog_scope() def test_astype(self): - a = fluid.layers.data(name="a", shape=[10, 1]) + a = paddle.static.data(name="a", shape=[-1, 10, 1]) + a.desc.set_need_check_feed(False) b = a.astype('float32') place = fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index c6ade1ca53..3fcd423aa5 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -23,7 +23,6 @@ from test_sum_op import TestReduceOPTensorAxisBase import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.fluid import Program, program_guard np.random.seed(10) @@ -82,12 +81,12 @@ class TestMeanOpError(unittest.TestCase): input1 = 12 self.assertRaises(TypeError, paddle.mean, input1) # The input dtype of mean_op must be float16, float32, float64. - input2 = fluid.layers.data( - name='input2', shape=[12, 10], dtype="int32" + input2 = paddle.static.data( + name='input2', shape=[-1, 12, 10], dtype="int32" ) self.assertRaises(TypeError, paddle.mean, input2) - input3 = fluid.layers.data( - name='input3', shape=[4], dtype="float16" + input3 = paddle.static.data( + name='input3', shape=[-1, 4], dtype="float16" ) paddle.nn.functional.softmax(input3) @@ -442,7 +441,7 @@ class TestMeanDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [3, 4, 5], False, dtype) + data = paddle.static.data('data', [3, 4, 5], dtype) data.persistable = True out = paddle.mean(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) @@ -473,7 +472,7 @@ class TestMeanTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [3, 4, 5], False, dtype) + data = paddle.static.data('data', [3, 4, 5], dtype) data.persistable = True out = paddle.mean(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_memory_reuse_exclude_feed_var.py b/python/paddle/fluid/tests/unittests/test_memory_reuse_exclude_feed_var.py index bdd6b3d304..32ce652ace 100644 --- a/python/paddle/fluid/tests/unittests/test_memory_reuse_exclude_feed_var.py +++ b/python/paddle/fluid/tests/unittests/test_memory_reuse_exclude_feed_var.py @@ -27,8 +27,8 @@ class TestMemoryReuseExcludeFeedVar(unittest.TestCase): self.iteration = 10 def main_impl(self, place): - image = fluid.layers.data( - name='image', shape=self.image_shape, dtype='float32' + image = paddle.static.data( + name='image', shape=[-1] + self.image_shape, dtype='float32' ) relu_image = F.relu(image) loss = paddle.mean(relu_image) diff --git a/python/paddle/fluid/tests/unittests/test_memory_usage.py b/python/paddle/fluid/tests/unittests/test_memory_usage.py index b083bc6d30..a9b2d2a92b 100644 --- a/python/paddle/fluid/tests/unittests/test_memory_usage.py +++ b/python/paddle/fluid/tests/unittests/test_memory_usage.py @@ -26,9 +26,9 @@ def train_simulator(test_batch_size=10): "but got batch_size={}".format(test_batch_size) ) - x = fluid.layers.data(name='x', shape=[13], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) diff --git a/python/paddle/fluid/tests/unittests/test_mix_precision_all_reduce_fuse.py b/python/paddle/fluid/tests/unittests/test_mix_precision_all_reduce_fuse.py index 20bf8e0985..6a1c6af11f 100644 --- a/python/paddle/fluid/tests/unittests/test_mix_precision_all_reduce_fuse.py +++ b/python/paddle/fluid/tests/unittests/test_mix_precision_all_reduce_fuse.py @@ -36,8 +36,10 @@ def loss_net(hidden, label): def conv_net(use_feed): - img = fluid.layers.data(name='image', shape=img_shape, dtype='float16') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data( + name='image', shape=[-1] + img_shape, dtype='float16' + ) + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') conv_pool_1 = fluid.nets.simple_img_conv_pool( input=img, diff --git a/python/paddle/fluid/tests/unittests/test_momentum_op.py b/python/paddle/fluid/tests/unittests/test_momentum_op.py index 4b745cb7a6..6ec02735d3 100644 --- a/python/paddle/fluid/tests/unittests/test_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_momentum_op.py @@ -528,8 +528,8 @@ class TestMomentumV2(unittest.TestCase): place = fluid.CPUPlace() main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y @@ -666,8 +666,8 @@ class TestMomentumOpWithDecayAPI(unittest.TestCase): place = fluid.CPUPlace() main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y diff --git a/python/paddle/fluid/tests/unittests/test_monitor.py b/python/paddle/fluid/tests/unittests/test_monitor.py index 205414668f..a5d5e30176 100644 --- a/python/paddle/fluid/tests/unittests/test_monitor.py +++ b/python/paddle/fluid/tests/unittests/test_monitor.py @@ -54,8 +54,8 @@ class TestDatasetWithStat(unittest.TestCase): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: - var = fluid.layers.data( - name=slot, shape=[1], dtype="int64", lod_level=1 + var = paddle.static.data( + name=slot, shape=[-1, 1], dtype="int64", lod_level=1 ) slots_vars.append(var) diff --git a/python/paddle/fluid/tests/unittests/test_mse_loss.py b/python/paddle/fluid/tests/unittests/test_mse_loss.py index 99fbe01295..75dafb1ea3 100644 --- a/python/paddle/fluid/tests/unittests/test_mse_loss.py +++ b/python/paddle/fluid/tests/unittests/test_mse_loss.py @@ -79,12 +79,14 @@ class TestNNMseLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.layers.data( - name='input', shape=dim, dtype='float32' + input = paddle.static.data( + name='input', shape=[-1] + dim, dtype='float32' ) - label = fluid.layers.data( - name='label', shape=dim, dtype='float32' + input.desc.set_need_check_feed(False) + label = paddle.static.data( + name='label', shape=[-1] + dim, dtype='float32' ) + label.desc.set_need_check_feed(False) mse_loss = paddle.nn.loss.MSELoss() ret = mse_loss(input, label) @@ -123,12 +125,14 @@ class TestNNMseLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.layers.data( - name='input', shape=dim, dtype='float32' + input = paddle.static.data( + name='input', shape=[-1] + dim, dtype='float32' ) - label = fluid.layers.data( - name='label', shape=dim, dtype='float32' + input.desc.set_need_check_feed(False) + label = paddle.static.data( + name='label', shape=[-1] + dim, dtype='float32' ) + label.desc.set_need_check_feed(False) mse_loss = paddle.nn.loss.MSELoss(reduction='sum') ret = mse_loss(input, label) @@ -167,12 +171,14 @@ class TestNNMseLoss(unittest.TestCase): else fluid.CPUPlace() ) with fluid.program_guard(prog, startup_prog): - input = fluid.layers.data( - name='input', shape=dim, dtype='float32' + input = paddle.static.data( + name='input', shape=[-1] + dim, dtype='float32' ) - label = fluid.layers.data( - name='label', shape=dim, dtype='float32' + input.desc.set_need_check_feed(False) + label = paddle.static.data( + name='label', shape=[-1] + dim, dtype='float32' ) + label.desc.set_need_check_feed(False) mse_loss = paddle.nn.loss.MSELoss(reduction='none') ret = mse_loss(input, label) diff --git a/python/paddle/fluid/tests/unittests/test_multihead_attention.py b/python/paddle/fluid/tests/unittests/test_multihead_attention.py index 9d94a7713b..2db080edbf 100644 --- a/python/paddle/fluid/tests/unittests/test_multihead_attention.py +++ b/python/paddle/fluid/tests/unittests/test_multihead_attention.py @@ -31,18 +31,16 @@ class TestMultiheadAttention(unittest.TestCase): def set_program(self): """Build the test program.""" - queries = fluid.layers.data( + queries = paddle.static.data( name="queries", shape=self.input_shape, dtype="float32", - append_batch_size=False, ) queries.stop_gradient = False - keys = fluid.layers.data( + keys = paddle.static.data( name="keys", shape=self.input_shape, dtype="float32", - append_batch_size=False, ) keys.stop_gradient = False diff --git a/python/paddle/fluid/tests/unittests/test_name_scope.py b/python/paddle/fluid/tests/unittests/test_name_scope.py index 372547722c..daecd21ab7 100644 --- a/python/paddle/fluid/tests/unittests/test_name_scope.py +++ b/python/paddle/fluid/tests/unittests/test_name_scope.py @@ -21,7 +21,7 @@ import paddle.fluid as fluid class TestNameScope(unittest.TestCase): def test_name_scope(self): with fluid.name_scope("s1"): - a = fluid.layers.data(name='data', shape=[1], dtype='int32') + a = paddle.static.data(name='data', shape=[-1, 1], dtype='int32') b = a + 1 with fluid.name_scope("s2"): c = b * 1 diff --git a/python/paddle/fluid/tests/unittests/test_nce.py b/python/paddle/fluid/tests/unittests/test_nce.py index ee51b0d608..e2923da711 100644 --- a/python/paddle/fluid/tests/unittests/test_nce.py +++ b/python/paddle/fluid/tests/unittests/test_nce.py @@ -187,8 +187,10 @@ class TestNCECase1SelectedRows(unittest.TestCase): custom_dist, is_sparse, ): - input = fluid.layers.data(name="input", shape=[10], dtype="float32") - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + input = paddle.static.data( + name="input", shape=[-1, 10], dtype="float32" + ) + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") w_param = ( fluid.default_main_program() @@ -288,7 +290,7 @@ class TestNCE_OpError(unittest.TestCase): input1 = fluid.create_lod_tensor( np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], fluid.CPUPlace() ) - label1 = fluid.layers.data( + label1 = paddle.static.data( name='label1', shape=[-1, 4], dtype="int64" ) # the input(input) of nce layer must be Variable. @@ -296,7 +298,7 @@ class TestNCE_OpError(unittest.TestCase): TypeError, paddle.static.nn.nce, input1, label1, 5 ) - input2 = fluid.layers.data( + input2 = paddle.static.data( name='input2', shape=[-1, 4], dtype="float32" ) label2 = fluid.create_lod_tensor( @@ -307,10 +309,10 @@ class TestNCE_OpError(unittest.TestCase): TypeError, paddle.static.nn.nce, input2, label2, 5 ) - input3 = fluid.layers.data( + input3 = paddle.static.data( name='input3', shape=[-1, 4], dtype="float16" ) - label3 = fluid.layers.data( + label3 = paddle.static.data( name='label3', shape=[-1, 1], dtype="int64" ) # the data type of input(input) must be float32 or float64. @@ -318,10 +320,10 @@ class TestNCE_OpError(unittest.TestCase): TypeError, paddle.static.nn.nce, input3, label3, 5 ) - input4 = fluid.layers.data( + input4 = paddle.static.data( name='input4', shape=[-1, 4], dtype="float32" ) - label4 = fluid.layers.data( + label4 = paddle.static.data( name='label4', shape=[-1, 1], dtype="int32" ) # the data type of input(label) must be int64. diff --git a/python/paddle/fluid/tests/unittests/test_network_with_dtype.py b/python/paddle/fluid/tests/unittests/test_network_with_dtype.py index cdde0c1a46..19aa2b7219 100644 --- a/python/paddle/fluid/tests/unittests/test_network_with_dtype.py +++ b/python/paddle/fluid/tests/unittests/test_network_with_dtype.py @@ -30,8 +30,8 @@ class TestNetWithDtype(unittest.TestCase): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): - x = fluid.layers.data(name='x', shape=[13], dtype=self.dtype) - y = fluid.layers.data(name='y', shape=[1], dtype=self.dtype) + x = paddle.static.data(name='x', shape=[-1, 13], dtype=self.dtype) + y = paddle.static.data(name='y', shape=[-1, 1], dtype=self.dtype) y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y diff --git a/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_static.py b/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_static.py index 1051ef8d4b..d89af631ba 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_static.py +++ b/python/paddle/fluid/tests/unittests/test_nn_functional_embedding_static.py @@ -16,6 +16,7 @@ import unittest import numpy as np +import paddle import paddle.fluid as fluid import paddle.nn.functional as functional @@ -41,10 +42,9 @@ class EmbeddingStatic(unittest.TestCase): (128, 100), attr=param_attr, dtype="float32" ) - label = fluid.layers.data( + label = paddle.static.data( name="label", - shape=[4], - append_batch_size=False, + shape=[-1, 4], dtype="int64", ) @@ -74,10 +74,9 @@ class EmbeddingStatic(unittest.TestCase): (128, 100), attr=param_attr, dtype="float32" ) - label = fluid.layers.data( + label = paddle.static.data( name="label", - shape=[4], - append_batch_size=False, + shape=[-1, 4], dtype="int32", ) diff --git a/python/paddle/fluid/tests/unittests/test_nn_functional_hot_op.py b/python/paddle/fluid/tests/unittests/test_nn_functional_hot_op.py index 0faf226fac..0043f15aed 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_functional_hot_op.py +++ b/python/paddle/fluid/tests/unittests/test_nn_functional_hot_op.py @@ -134,9 +134,13 @@ class TestOneHotOp_exception(unittest.TestCase): def test_check_output(self): program = Program() with program_guard(program): - x = fluid.layers.data( - name='x', shape=[self.dimension], dtype='float32', lod_level=1 + x = paddle.static.data( + name='x', + shape=[-1, self.dimension], + dtype='float32', + lod_level=1, ) + x.desc.set_need_check_feed(False) block = program.current_block() one_hot_out = block.create_var( name="one_hot_out", @@ -181,7 +185,8 @@ class TestOneHotOpApi(unittest.TestCase): ) def _run(self, num_classes): - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") + label.desc.set_need_check_feed(False) one_hot_label = functional.one_hot(x=label, num_classes=num_classes) place = fluid.CPUPlace() @@ -205,12 +210,12 @@ class BadInputTestOnehotV2(unittest.TestCase): with fluid.program_guard(fluid.Program()): def test_bad_x(): - label = fluid.layers.data( + label = paddle.static.data( name="label", shape=[4], - append_batch_size=False, dtype="float32", ) + label.desc.set_need_check_feed(False) one_hot_label = functional.one_hot(x=label, num_classes=4) self.assertRaises(TypeError, test_bad_x) diff --git a/python/paddle/fluid/tests/unittests/test_nn_grad.py b/python/paddle/fluid/tests/unittests/test_nn_grad.py index ec355baefa..b86d3029dd 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_nn_grad.py @@ -21,7 +21,6 @@ from decorator_helper import prog_scope import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers paddle.enable_static() @@ -73,7 +72,7 @@ class TestReduceMeanWithDimDoubleGradCheck(unittest.TestCase): eps = 0.05 dtype = np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) x.persistable = True y = paddle.mean(x, axis=0) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -97,7 +96,7 @@ class TestReduceSumWithDimDoubleGradCheck(unittest.TestCase): eps = 0.05 dtype = np.float64 - x = layers.data('x', shape, False, dtype) + x = paddle.static.data('x', shape, dtype) x.persistable = True y = paddle.sum(x, axis=0) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) @@ -122,7 +121,7 @@ class TestReshapeDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', x_shape, False, dtype) + x = paddle.static.data('x', x_shape, dtype) x.persistable = True out = paddle.reshape(x, new_shape) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) @@ -150,7 +149,7 @@ class TestTileDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', x_shape, False, dtype) + x = paddle.static.data('x', x_shape, dtype) x.persistable = True out = paddle.tile(x, repeat_times) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) @@ -181,7 +180,7 @@ class TestExpandV2DoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', x_shape, False, dtype) + x = paddle.static.data('x', x_shape, dtype) x.persistable = True out = paddle.expand(x, new_shape) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) @@ -213,7 +212,7 @@ class TestSqueezeDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', x_shape, False, dtype) + x = paddle.static.data('x', x_shape, dtype) x.persistable = True out = paddle.squeeze(x, axes) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) @@ -245,7 +244,7 @@ class TestUnsqueezeDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', x_shape, False, dtype) + x = paddle.static.data('x', x_shape, dtype) x.persistable = True out = paddle.unsqueeze(x, axes) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) @@ -274,7 +273,7 @@ class TestClipDoubleGradCheck(unittest.TestCase): x_shape = [2, 4, 10] dtype = np.float64 - x = layers.data('x', x_shape, False, dtype) + x = paddle.static.data('x', x_shape, dtype) x.persistable = True out = paddle.clip(x, min=-1.0, max=1.0) x_arr = np.random.uniform(-5.0, 5.0, x_shape).astype(dtype) @@ -299,7 +298,7 @@ class TestTransposeDoubleGradCheck(unittest.TestCase): perm = [1, 0] dtype = np.float64 - x = layers.data('x', x_shape, False, dtype) + x = paddle.static.data('x', x_shape, dtype) x.persistable = True out = paddle.transpose(x, perm) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) @@ -321,7 +320,7 @@ class TestTransposeDoubleGradCheckCase1(unittest.TestCase): perm = [0, 2, 3, 1] dtype = np.float64 - x = layers.data('x', x_shape, False, dtype) + x = paddle.static.data('x', x_shape, dtype) x.persistable = True out = paddle.transpose(x, perm) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) @@ -348,7 +347,7 @@ class TestConstantPadDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float64 - x = layers.data('x', x_shape, False, dtype) + x = paddle.static.data('x', x_shape, dtype) x.persistable = True out = paddle.nn.functional.pad(x, pad) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) @@ -375,7 +374,7 @@ class TestConstantPadDoubleGradCheckCase1(TestConstantPadDoubleGradCheck): pad = [1, 0, 1, 0, 1, 0, 1, 0] dtype = np.float64 - x = layers.data('x', x_shape, False, dtype) + x = paddle.static.data('x', x_shape, dtype) x.persistable = True out = paddle.nn.functional.pad(x, pad) x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) @@ -393,8 +392,8 @@ class TestConcatDoubleGradCheck(unittest.TestCase): pad = [1, 1, 1, 1] dtype = np.float64 - x1 = layers.data('x', x_shape, False, dtype) - x2 = layers.data('x', x_shape, False, dtype) + x1 = paddle.static.data('x', x_shape, dtype) + x2 = paddle.static.data('x', x_shape, dtype) x1.persistable = True x2.persistable = True out = paddle.concat([x1, x2], axis=0) @@ -423,10 +422,9 @@ class TestConcatDoubleGradCheck(unittest.TestCase): class TestAvgPool2DDoubleGradCheckCase1(unittest.TestCase): @prog_scope() def func(self, place): - input_NCHW = fluid.layers.data( + input_NCHW = paddle.static.data( name="input_NCHW", shape=[2, 3, 5, 5], - append_batch_size=False, dtype="float32", ) @@ -454,10 +452,9 @@ class TestAvgPool2DDoubleGradCheckCase2(unittest.TestCase): @prog_scope() def func(self, place): - input_NHWC = fluid.layers.data( + input_NHWC = paddle.static.data( name="input_NHWC", shape=[2, 5, 5, 3], - append_batch_size=False, dtype="float32", ) @@ -491,10 +488,9 @@ class TestAvgPool2DDoubleGradCheckCase3(unittest.TestCase): @prog_scope() def func(self, place): - input_NCHW = fluid.layers.data( + input_NCHW = paddle.static.data( name="input_NCHW", shape=[2, 3, 5, 5], - append_batch_size=False, dtype="float32", ) @@ -525,10 +521,9 @@ class TestAvgPool2DDoubleGradCheckCase4(unittest.TestCase): @prog_scope() def func(self, place): - input_NCHW = fluid.layers.data( + input_NCHW = paddle.static.data( name="input_NCHW", shape=[2, 3, 5, 5], - append_batch_size=False, dtype="float32", ) diff --git a/python/paddle/fluid/tests/unittests/test_nonzero_api.py b/python/paddle/fluid/tests/unittests/test_nonzero_api.py index dce29b96e5..aac65fba54 100644 --- a/python/paddle/fluid/tests/unittests/test_nonzero_api.py +++ b/python/paddle/fluid/tests/unittests/test_nonzero_api.py @@ -25,7 +25,8 @@ class TestNonZeroAPI(unittest.TestCase): def test_nonzero_api_as_tuple(self): data = np.array([[True, False], [False, True]]) with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 2]) + x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') + x.desc.set_need_check_feed(False) y = paddle.nonzero(x, as_tuple=True) self.assertEqual(type(y), tuple) self.assertEqual(len(y), 2) @@ -40,7 +41,8 @@ class TestNonZeroAPI(unittest.TestCase): data = np.array([True, True, False]) with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1]) + x = paddle.static.data(name='x', shape=[-1], dtype='float32') + x.desc.set_need_check_feed(False) y = paddle.nonzero(x, as_tuple=True) self.assertEqual(type(y), tuple) self.assertEqual(len(y), 1) @@ -55,7 +57,8 @@ class TestNonZeroAPI(unittest.TestCase): def test_nonzero_api(self): data = np.array([[True, False], [False, True]]) with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 2]) + x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') + x.desc.set_need_check_feed(False) y = paddle.nonzero(x) exe = fluid.Executor(fluid.CPUPlace()) (res,) = exe.run( @@ -66,7 +69,8 @@ class TestNonZeroAPI(unittest.TestCase): data = np.array([True, True, False]) with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1]) + x = paddle.static.data(name='x', shape=[-1], dtype='float32') + x.desc.set_need_check_feed(False) y = paddle.nonzero(x) exe = fluid.Executor(fluid.CPUPlace()) (res,) = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py b/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py index 60cc8c7665..8ef60fce49 100644 --- a/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py +++ b/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py @@ -32,11 +32,10 @@ class TestNormalization(unittest.TestCase): def set_program(self, axis, epsilon): """Build the test program.""" - data = fluid.layers.data( + data = paddle.static.data( name=self.data_desc["name"], shape=self.data_desc["shape"], dtype="float32", - append_batch_size=False, ) data.stop_gradient = False l2_norm = paddle.nn.functional.normalize( diff --git a/python/paddle/fluid/tests/unittests/test_npair_loss_op.py b/python/paddle/fluid/tests/unittests/test_npair_loss_op.py index efbf8a98a8..841d7acc2c 100644 --- a/python/paddle/fluid/tests/unittests/test_npair_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_npair_loss_op.py @@ -83,23 +83,20 @@ class TestNpairLossOp(unittest.TestCase): l2_reg=reg_lambda, ) - anc = fluid.layers.data( + anc = paddle.static.data( dtype='float32', name='anc', shape=embeddings_anchor.shape, - append_batch_size=False, ) - pos = fluid.layers.data( + pos = paddle.static.data( dtype='float32', name='pos', shape=embeddings_positive.shape, - append_batch_size=False, ) - lab = fluid.layers.data( + lab = paddle.static.data( dtype='float32', name='lab', shape=row_labels.shape, - append_batch_size=False, ) npair_loss_op = paddle.nn.functional.npair_loss( diff --git a/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py b/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py index 53e34ae70e..30bb75e0fa 100644 --- a/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py @@ -133,9 +133,13 @@ class TestOneHotOp_exception(unittest.TestCase): def test_check_output(self): program = Program() with program_guard(program): - x = fluid.layers.data( - name='x', shape=[self.dimension], dtype='float32', lod_level=1 + x = paddle.static.data( + name='x', + shape=[-1, self.dimension], + dtype='float32', + lod_level=1, ) + x.desc.set_need_check_feed(False) block = program.current_block() one_hot_out = block.create_var( name="one_hot_out", @@ -187,7 +191,8 @@ class TestOneHotOpApi(unittest.TestCase): ) def _run(self, depth): - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") + label.desc.set_need_check_feed(False) one_hot_label = fluid.one_hot(input=label, depth=depth) place = fluid.CPUPlace() @@ -211,12 +216,12 @@ class BadInputTestOnehotV2(unittest.TestCase): with fluid.program_guard(fluid.Program()): def test_bad_x(): - label = fluid.layers.data( + label = paddle.static.data( name="label", - shape=[4], - append_batch_size=False, + shape=[-1, 4], dtype="float32", ) + label.desc.set_need_check_feed(False) one_hot_label = fluid.one_hot(input=label, depth=4) self.assertRaises(TypeError, test_bad_x) diff --git a/python/paddle/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py index d5f54d4482..20a4a7705e 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer.py @@ -1181,10 +1181,12 @@ class TestRecomputeOptimizer(unittest.TestCase): scope = fluid.Scope() with fluid.scope_guard(scope): with program_guard(main_program, startup_program): - input_x = fluid.layers.data( - name="x", shape=[3], dtype='float32' + input_x = paddle.static.data( + name="x", shape=[-1, 3], dtype='float32' + ) + input_y = paddle.static.data( + name="y", shape=[-1, 1], dtype='int64' ) - input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') drop_res, prediction, cost = mlp(input_x, input_y) sgd = fluid.optimizer.Adam(learning_rate=0.01) sgd = fluid.optimizer.RecomputeOptimizer(sgd) @@ -1243,10 +1245,12 @@ class TestRecomputeOptimizerCUDA(unittest.TestCase): scope = fluid.Scope() with fluid.scope_guard(scope): with program_guard(main_program, startup_program): - input_x = fluid.layers.data( - name="x", shape=[3], dtype='float32' + input_x = paddle.static.data( + name="x", shape=[-1, 3], dtype='float32' + ) + input_y = paddle.static.data( + name="y", shape=[-1, 1], dtype='int64' ) - input_y = fluid.layers.data(name="y", shape=[1], dtype='int64') drop_res, prediction, cost = mlp(input_x, input_y) sgd = fluid.optimizer.Adam(learning_rate=0.01) sgd = fluid.optimizer.RecomputeOptimizer(sgd) diff --git a/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py b/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py index 731693f5cf..3b32c9ca4e 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer_in_control_flow.py @@ -264,7 +264,7 @@ class TestMultiOptimizersMultiCardsError(unittest.TestCase): def fn_2(opt, avg_loss): opt.minimize(avg_loss) - x = fluid.layers.data("X", [10], 'float32') + x = paddle.static.data("X", [-1, 10], 'float32') hidden = paddle.static.nn.fc(x, 5) avg_loss = paddle.mean(hidden) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py index 8654f0ba5e..0984b02d75 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py @@ -31,7 +31,7 @@ class TestParallelExecutorDropExeScope(unittest.TestCase): train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): - data = fluid.layers.data(name='X', shape=[1], dtype='float32') + data = paddle.static.data(name='X', shape=[-1, 1], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) test_program = fluid.default_main_program().clone(for_test=True) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py index 93fecfefb5..0d750ddcbe 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py @@ -76,8 +76,8 @@ class TestMNISTDryRun(TestBase): @staticmethod def network_func(): - img = fluid.layers.data(name='img', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data(name='img', shape=[-1, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') hidden = img for _ in range(10): hidden = paddle.static.nn.fc(x=img, size=200, activation='tanh') diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py index c92d323459..5aa87bc7b0 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py @@ -55,10 +55,12 @@ class TestFetchAndFeed(unittest.TestCase): startup = fluid.Program() startup.random_seed = seed with fluid.program_guard(main_program, startup): - data = fluid.layers.data( - name='image', shape=[3, 224, 224], dtype='float32' + data = paddle.static.data( + name='image', shape=[-1, 3, 224, 224], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') out = Lenet(data, class_dim=102) loss = paddle.nn.functional.cross_entropy( input=out, label=label, reduction='none', use_softmax=False diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py index d81586fe25..850ddc379c 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py @@ -24,8 +24,8 @@ import paddle.fluid.core as core def simple_fc_net(use_feed): - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data(name='image', shape=[-1, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') hidden = img for _ in range(4): hidden = paddle.static.nn.fc( @@ -45,8 +45,8 @@ def simple_fc_net(use_feed): def fc_with_batchnorm(use_feed): - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data(name='image', shape=[-1, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') hidden = img for _ in range(1): diff --git a/python/paddle/fluid/tests/unittests/test_print_op.py b/python/paddle/fluid/tests/unittests/test_print_op.py index 75e8e93987..19be1c7526 100755 --- a/python/paddle/fluid/tests/unittests/test_print_op.py +++ b/python/paddle/fluid/tests/unittests/test_print_op.py @@ -19,7 +19,6 @@ from simple_nets import init_data, simple_fc_net import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core from paddle.fluid.framework import switch_main_program from paddle.static import Program, program_guard @@ -36,7 +35,7 @@ class TestPrintOpCPU(unittest.TestCase): self.x_tensor.set_recursive_sequence_lengths([[1, 1]]) def build_network(self, only_forward, **kargs): - x = layers.data('x', shape=[3], dtype='float32', lod_level=1) + x = paddle.static.data('x', shape=[-1, 3], dtype='float32', lod_level=1) x.stop_gradient = False paddle.static.Print(input=x, **kargs) loss = paddle.mean(x) @@ -60,7 +59,7 @@ class TestPrintOpCPU(unittest.TestCase): ) def test_all_parameters(self): - x = layers.data('x', shape=[3], dtype='float32', lod_level=1) + x = paddle.static.data('x', shape=[-1, 3], dtype='float32', lod_level=1) x.stop_gradient = False for print_tensor_name in [True, False]: diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index b20538cad5..e22deb5df8 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -37,7 +37,9 @@ class TestProfiler(unittest.TestCase): startup_program = fluid.Program() main_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - image = fluid.layers.data(name='x', shape=[784], dtype='float32') + image = paddle.static.data( + name='x', shape=[-1, 784], dtype='float32' + ) hidden1 = paddle.static.nn.fc(x=image, size=64, activation='relu') i = layers.zeros(shape=[1], dtype='int64') counter = fluid.layers.zeros( @@ -62,7 +64,7 @@ class TestProfiler(unittest.TestCase): predict = paddle.static.nn.fc( x=hidden2, size=10, activation='softmax' ) - label = fluid.layers.data(name='y', shape=[1], dtype='int64') + label = paddle.static.data(name='y', shape=[-1, 1], dtype='int64') cost = paddle.nn.functional.cross_entropy( input=predict, label=label, reduction='none', use_softmax=False ) diff --git a/python/paddle/fluid/tests/unittests/test_program.py b/python/paddle/fluid/tests/unittests/test_program.py index 56b8b35234..dfc9b7572d 100644 --- a/python/paddle/fluid/tests/unittests/test_program.py +++ b/python/paddle/fluid/tests/unittests/test_program.py @@ -16,7 +16,6 @@ import unittest import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid.framework import Program, default_main_program, program_guard paddle.enable_static() @@ -98,7 +97,7 @@ class TestProgram(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - d = layers.data(name='x', shape=[784], dtype='float32') + d = paddle.static.data(name='x', shape=[-1, 784], dtype='float32') hidden = paddle.static.nn.fc(x=d, size=100) paddle.static.nn.fc(x=hidden, size=100) diff --git a/python/paddle/fluid/tests/unittests/test_program_prune_backward.py b/python/paddle/fluid/tests/unittests/test_program_prune_backward.py index 337feb1617..5364dcaa6e 100755 --- a/python/paddle/fluid/tests/unittests/test_program_prune_backward.py +++ b/python/paddle/fluid/tests/unittests/test_program_prune_backward.py @@ -30,8 +30,8 @@ import paddle.fluid.core as core def simple_fc_net_with_accuracy(use_feed): - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data(name='image', shape=[-1, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') hidden = img for _ in range(4): @@ -53,12 +53,12 @@ def simple_fc_net_with_accuracy(use_feed): def cond_net(use_feed=None): - x = fluid.layers.data(name="x", shape=[4], dtype='float32') - label = fluid.layers.data('label', shape=[1], dtype='int64') + x = paddle.static.data(name="x", shape=[-1, 4], dtype='float32') + label = paddle.static.data('label', shape=[-1, 1], dtype='int64') prediction = paddle.static.nn.fc(x, size=1, activation=None) def loss1(pred, label): - x = fluid.layers.data(name="x", shape=[4], dtype='float32') + x = paddle.static.data(name="x", shape=[-1, 4], dtype='float32') loss = paddle.nn.functional.cross_entropy( input=pred, label=label, reduction='none', use_softmax=False ) @@ -82,12 +82,12 @@ def cond_net(use_feed=None): def optimization_in_cond_net(with_optimize=False): - x = fluid.layers.data(name="x", shape=[4], dtype='float32') - label = fluid.layers.data('label', shape=[1], dtype='int64') + x = paddle.static.data(name="x", shape=[-1, 4], dtype='float32') + label = paddle.static.data('label', shape=[-1, 1], dtype='int64') prediction = paddle.static.nn.fc(x, size=1, activation=None) def loss1(opt, pred, label, with_optimize): - x = fluid.layers.data(name="x", shape=[4], dtype='float32') + x = paddle.static.data(name="x", shape=[-1, 4], dtype='float32') loss = paddle.nn.functional.cross_entropy( input=pred, label=label, reduction='none', use_softmax=False ) diff --git a/python/paddle/fluid/tests/unittests/test_program_to_string.py b/python/paddle/fluid/tests/unittests/test_program_to_string.py index 55f32b687d..04a816a541 100644 --- a/python/paddle/fluid/tests/unittests/test_program_to_string.py +++ b/python/paddle/fluid/tests/unittests/test_program_to_string.py @@ -21,9 +21,7 @@ import paddle.fluid as fluid class TestProgram(unittest.TestCase): def test_program_to_string(self): prog = fluid.default_main_program() - a = fluid.layers.data( - name="X", shape=[2, 3], dtype="float32", append_batch_size=False - ) + a = paddle.static.data(name="X", shape=[2, 3], dtype="float32") c = paddle.static.nn.fc(a, size=3) prog_string = prog.to_string(throw_on_error=True, with_details=False) prog_string_with_details = prog.to_string( diff --git a/python/paddle/fluid/tests/unittests/test_prune.py b/python/paddle/fluid/tests/unittests/test_prune.py index 2c97f49aee..30e3aefe0a 100644 --- a/python/paddle/fluid/tests/unittests/test_prune.py +++ b/python/paddle/fluid/tests/unittests/test_prune.py @@ -25,8 +25,10 @@ import paddle.fluid.framework as framework class TestPrune(unittest.TestCase): def net(self): - x = fluid.layers.data(name='x', shape=[2], dtype='float32') - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') + x.desc.set_need_check_feed(False) + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") + label.desc.set_need_check_feed(False) y = paddle.static.nn.fc(x=[x], size=2, activation="softmax") loss = paddle.nn.functional.cross_entropy( input=y, label=label, reduction='none', use_softmax=False @@ -161,8 +163,10 @@ def _mock_guard(mock): class TestExecutorRunAutoPrune(unittest.TestCase): def net1(self): - x = fluid.layers.data(name='x', shape=[2], dtype='float32') - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') + x.desc.set_need_check_feed(False) + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") + label.desc.set_need_check_feed(False) w_param_attrs = fluid.ParamAttr( name="fc_weight", learning_rate=0.5, @@ -185,9 +189,12 @@ class TestExecutorRunAutoPrune(unittest.TestCase): return x, y, label, loss1, loss2, w_param_attrs def net2(self): - x1 = fluid.layers.data(name='x1', shape=[2], dtype='float32') - x2 = fluid.layers.data(name='x2', shape=[2], dtype='float32') - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + x1 = paddle.static.data(name='x1', shape=[-1, 2], dtype='float32') + x1.desc.set_need_check_feed(False) + x2 = paddle.static.data(name='x2', shape=[-1, 2], dtype='float32') + x2.desc.set_need_check_feed(False) + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") + label.desc.set_need_check_feed(False) w1_param_attrs = fluid.ParamAttr( name="fc_weight1", learning_rate=0.5, diff --git a/python/paddle/fluid/tests/unittests/test_pull_gpups_sparse_op.py b/python/paddle/fluid/tests/unittests/test_pull_gpups_sparse_op.py index b537a31b3a..1e50e1a08c 100644 --- a/python/paddle/fluid/tests/unittests/test_pull_gpups_sparse_op.py +++ b/python/paddle/fluid/tests/unittests/test_pull_gpups_sparse_op.py @@ -32,8 +32,8 @@ class TestPullGpupsSparse(unittest.TestCase): slots = [] with fluid.program_guard(train_program, startup_program): - l = fluid.layers.data( - name='input', shape=[1], dtype="int64", lod_level=1 + l = paddle.static.data( + name='input', shape=[-1, 1], dtype="int64", lod_level=1 ) slots.append(l) output = _pull_gpups_sparse( diff --git a/python/paddle/fluid/tests/unittests/test_py_func_op.py b/python/paddle/fluid/tests/unittests/test_py_func_op.py index 69e363ee71..a90e37a475 100644 --- a/python/paddle/fluid/tests/unittests/test_py_func_op.py +++ b/python/paddle/fluid/tests/unittests/test_py_func_op.py @@ -179,8 +179,12 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor): with fluid.scope_guard(fluid.core.Scope()): gen = paddle.seed(1) np.random.seed(1) - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + img = paddle.static.data( + name='image', shape=[-1, 784], dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' + ) loss = simple_fc_net(img, label, use_py_func_op) optimizer = fluid.optimizer.SGD(learning_rate=1e-3) optimizer.minimize(loss) diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_combination.py b/python/paddle/fluid/tests/unittests/test_py_reader_combination.py index e238eeaf4c..db664c55eb 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_combination.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_combination.py @@ -56,10 +56,12 @@ class TestPyReaderCombination(unittest.TestCase): def main_impl(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): - image = fluid.layers.data( - name='image', dtype='float32', shape=[784] + image = paddle.static.data( + name='image', dtype='float32', shape=[-1, 784] + ) + label = paddle.static.data( + name='label', dtype='int64', shape=[-1, 1] ) - label = fluid.layers.data(name='label', dtype='int64', shape=[1]) py_reader1 = fluid.io.PyReader( feed_list=[image, label], capacity=16, iterable=True diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_return_list.py b/python/paddle/fluid/tests/unittests/test_py_reader_return_list.py index d18a66b828..ddef600b16 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_return_list.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_return_list.py @@ -38,8 +38,8 @@ class TestPyReader(unittest.TestCase): for return_list in [True, False]: with fluid.program_guard(fluid.Program(), fluid.Program()): - image = fluid.layers.data( - name='image', shape=[784, 784], dtype='float32' + image = paddle.static.data( + name='image', shape=[-1, 784, 784], dtype='float32' ) reader = fluid.io.PyReader( feed_list=[image], diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_sample_generator.py b/python/paddle/fluid/tests/unittests/test_py_reader_sample_generator.py index 19cd4f546a..afc81e0f2a 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_sample_generator.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_sample_generator.py @@ -54,8 +54,10 @@ class TestCaseBase(unittest.TestCase): return ret def run_main(self, reader, use_sample_generator, iterable, drop_last): - image = fluid.layers.data(name='image', dtype='float32', shape=[784]) - label = fluid.layers.data(name='label', dtype='int64', shape=[1]) + image = paddle.static.data( + name='image', dtype='float32', shape=[-1, 784] + ) + label = paddle.static.data(name='label', dtype='int64', shape=[-1, 1]) py_reader = fluid.io.PyReader( feed_list=[image, label], capacity=16, diff --git a/python/paddle/fluid/tests/unittests/test_reader_reset.py b/python/paddle/fluid/tests/unittests/test_reader_reset.py index bbdade712e..9524068316 100644 --- a/python/paddle/fluid/tests/unittests/test_reader_reset.py +++ b/python/paddle/fluid/tests/unittests/test_reader_reset.py @@ -46,10 +46,12 @@ class TestReaderReset(unittest.TestCase): startup_prog = fluid.Program() with fluid.program_guard(main_prog, startup_prog): - image = fluid.layers.data( - name='image', shape=self.ins_shape, dtype='float32' + image = paddle.static.data( + name='image', shape=[-1] + self.ins_shape, dtype='float32' + ) + label = paddle.static.data( + name='label', shape=[-1, 1], dtype='int64' ) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') data_reader_handle = fluid.io.PyReader( feed_list=[image, label], capacity=16, diff --git a/python/paddle/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_recurrent_op.py index db1abc9ef0..4ea5ed0e0d 100644 --- a/python/paddle/fluid/tests/unittests/test_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_recurrent_op.py @@ -135,15 +135,14 @@ class RecurrentOpTest1(unittest.TestCase): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data( + x = paddle.static.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', - append_batch_size=False, ) x.stop_gradient = False - h_boot = layers.data( - shape=[self.input_dim], dtype='float32', name='h_boot' + h_boot = paddle.static.data( + shape=[-1, self.input_dim], dtype='float32', name='h_boot' ) h_boot.stop_gradient = False @@ -281,15 +280,14 @@ class RecurrentOpTest2(RecurrentOpTest1): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data( + x = paddle.static.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', - append_batch_size=False, ) x.stop_gradient = False - h_boot = layers.data( - shape=[self.input_dim], dtype='float32', name='h_boot' + h_boot = paddle.static.data( + shape=[-1, self.input_dim], dtype='float32', name='h_boot' ) h_boot.stop_gradient = False @@ -390,25 +388,22 @@ class RecurrentOpMultipleMemoryTest(RecurrentOpTest1): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data( + x = paddle.static.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', - append_batch_size=False, ) x.stop_gradient = False - h_boot1 = layers.data( + h_boot1 = paddle.static.data( shape=[self.batch_size, self.input_dim], dtype='float32', name='h_boot1', - append_batch_size=False, ) h_boot1.stop_gradient = False - h_boot2 = layers.data( + h_boot2 = paddle.static.data( shape=[self.batch_size, self.input_dim], dtype='float32', name='h_boot2', - append_batch_size=False, ) h_boot2.stop_gradient = False @@ -477,11 +472,10 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data( + x = paddle.static.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', - append_batch_size=False, ) x.stop_gradient = False @@ -582,34 +576,30 @@ class RecurrentOpSubBlockTest(RecurrentOpTest1): self.output = paddle.mean(rnn_out) def create_rnn_op(self): - x = layers.data( + x = paddle.static.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', - append_batch_size=False, ) x.stop_gradient = False - emb = layers.data( + emb = paddle.static.data( name='emb', shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', - append_batch_size=False, ) emb.stop_gradient = False - w1 = layers.data( + w1 = paddle.static.data( shape=[self.input_dim, self.input_dim], dtype='float32', name='w1', - append_batch_size=False, ) w1.stop_gradient = False - w2 = layers.data( + w2 = paddle.static.data( shape=[self.input_dim * 2, self.input_dim], dtype='float32', name='w2', - append_batch_size=False, ) w2.stop_gradient = False @@ -675,15 +665,14 @@ class RecurrentOpStopGradientTest(RecurrentOpTest1): self.output = paddle.mean(self.create_rnn_op()) def create_rnn_op(self): - x = layers.data( + x = paddle.static.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype="float32", name="x", - append_batch_size=False, ) x.stop_gradient = False - h_boot = layers.data( - shape=[self.input_dim], dtype="float32", name="h_boot" + h_boot = paddle.static.data( + shape=[-1, self.input_dim], dtype="float32", name="h_boot" ) h_boot.stop_gradient = True diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index b101c24e77..026554f010 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -507,8 +507,8 @@ class TestAllOpError(unittest.TestCase): input1 = 12 self.assertRaises(TypeError, paddle.all, input1) # The input dtype of reduce_all_op must be bool. - input2 = fluid.layers.data( - name='input2', shape=[12, 10], dtype="int32" + input2 = paddle.static.data( + name='input2', shape=[-1, 12, 10], dtype="int32" ) self.assertRaises(TypeError, paddle.all, input2) @@ -624,8 +624,8 @@ class TestAnyOpError(unittest.TestCase): input1 = 12 self.assertRaises(TypeError, paddle.any, input1) # The input dtype of reduce_any_op must be bool. - input2 = fluid.layers.data( - name='input2', shape=[12, 10], dtype="int32" + input2 = paddle.static.data( + name='input2', shape=[-1, 12, 10], dtype="int32" ) self.assertRaises(TypeError, paddle.any, input2) @@ -918,7 +918,7 @@ class TestReduceSumOpError(unittest.TestCase): ) self.assertRaises(TypeError, paddle.sum, x1) # The input dtype of reduce_sum_op must be float32 or float64 or int32 or int64. - x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") + x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="uint8") self.assertRaises(TypeError, paddle.sum, x2) diff --git a/python/paddle/fluid/tests/unittests/test_registry.py b/python/paddle/fluid/tests/unittests/test_registry.py index 1427d0e075..4d40eb9965 100644 --- a/python/paddle/fluid/tests/unittests/test_registry.py +++ b/python/paddle/fluid/tests/unittests/test_registry.py @@ -24,7 +24,7 @@ import paddle.fluid as fluid class TestRegistry(unittest.TestCase): @prog_scope() def test_registry_layer(self): - x = fluid.layers.data(name='X', shape=[10, 10], dtype='float32') + x = paddle.static.data(name='X', shape=[-1, 10, 10], dtype='float32') output = paddle.mean(x) place = fluid.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_regularizer.py b/python/paddle/fluid/tests/unittests/test_regularizer.py index 5ab643819d..13420553f1 100644 --- a/python/paddle/fluid/tests/unittests/test_regularizer.py +++ b/python/paddle/fluid/tests/unittests/test_regularizer.py @@ -198,10 +198,12 @@ class TestRegularizer(unittest.TestCase): with self.scope_prog_guard( main_prog=main_prog, startup_prog=startup_prog ): - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 + ) + label = paddle.static.data( + name="label", shape=[-1, 1], dtype="int64" ) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") avg_cost = model(data, label, self.word_len) @@ -221,10 +223,12 @@ class TestRegularizer(unittest.TestCase): with self.scope_prog_guard( main_prog=main_prog, startup_prog=startup_prog ): - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 + ) + label = paddle.static.data( + name="label", shape=[-1, 1], dtype="int64" ) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") avg_cost_l2 = model(data, label, self.word_len) diff --git a/python/paddle/fluid/tests/unittests/test_regularizer_api.py b/python/paddle/fluid/tests/unittests/test_regularizer_api.py index a863ed45fd..0d3e99a6c7 100644 --- a/python/paddle/fluid/tests/unittests/test_regularizer_api.py +++ b/python/paddle/fluid/tests/unittests/test_regularizer_api.py @@ -105,10 +105,12 @@ class TestRegularizer(unittest.TestCase): with self.scope_prog_guard( main_prog=main_prog, startup_prog=startup_prog ): - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 + ) + label = paddle.static.data( + name="label", shape=[-1, 1], dtype="int64" ) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") avg_cost = model(data, label, self.word_len) @@ -129,10 +131,12 @@ class TestRegularizer(unittest.TestCase): with self.scope_prog_guard( main_prog=main_prog, startup_prog=startup_prog ): - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 + ) + label = paddle.static.data( + name="label", shape=[-1, 1], dtype="int64" ) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") avg_cost_l2 = model(data, label, self.word_len) diff --git a/python/paddle/fluid/tests/unittests/test_renorm_op.py b/python/paddle/fluid/tests/unittests/test_renorm_op.py index 7a4cac34ec..79dc144ee5 100644 --- a/python/paddle/fluid/tests/unittests/test_renorm_op.py +++ b/python/paddle/fluid/tests/unittests/test_renorm_op.py @@ -38,7 +38,6 @@ class TestRenormAPI(unittest.TestCase): # case 1: with program_guard(Program(), Program()): - # x = fluid.layers.data(name = 'x',shape=[-1, 2, 3]) x = paddle.static.data(name="x", shape=[-1, 2, 3], dtype='float64') z = paddle.renorm(x, self.p, self.dim, self.max_norm) exe = fluid.Executor(fluid.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_repeat_interleave_op.py b/python/paddle/fluid/tests/unittests/test_repeat_interleave_op.py index 6b602fa741..90877a3047 100644 --- a/python/paddle/fluid/tests/unittests/test_repeat_interleave_op.py +++ b/python/paddle/fluid/tests/unittests/test_repeat_interleave_op.py @@ -120,13 +120,14 @@ class TestIndexSelectAPI(unittest.TestCase): # case 1: with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 4]) - index = fluid.layers.data( + x = paddle.static.data(name='x', shape=[-1, 4], dtype='float32') + x.desc.set_need_check_feed(False) + index = paddle.static.data( name='repeats_', shape=[4], dtype='int32', - append_batch_size=False, ) + index.desc.set_need_check_feed(False) z = paddle.repeat_interleave(x, index, axis=1) exe = fluid.Executor(fluid.CPUPlace()) (res,) = exe.run( @@ -140,13 +141,14 @@ class TestIndexSelectAPI(unittest.TestCase): # case 2: repeats = np.array([1, 2, 1]).astype('int32') with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 4]) - index = fluid.layers.data( + x = paddle.static.data(name='x', shape=[-1, 4], dtype="float32") + x.desc.set_need_check_feed(False) + index = paddle.static.data( name='repeats_', shape=[3], dtype='int32', - append_batch_size=False, ) + index.desc.set_need_check_feed(False) z = paddle.repeat_interleave(x, index, axis=0) exe = fluid.Executor(fluid.CPUPlace()) (res,) = exe.run( @@ -162,7 +164,8 @@ class TestIndexSelectAPI(unittest.TestCase): repeats = 2 with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 4]) + x = paddle.static.data(name='x', shape=[-1, 4], dtype='float32') + x.desc.set_need_check_feed(False) z = paddle.repeat_interleave(x, repeats, axis=0) exe = fluid.Executor(fluid.CPUPlace()) (res,) = exe.run( @@ -173,7 +176,8 @@ class TestIndexSelectAPI(unittest.TestCase): # case 3 zero_dim: with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[]) + x = paddle.static.data(name='x', shape=[-1], dtype="float32") + x.desc.set_need_check_feed(False) z = paddle.repeat_interleave(x, repeats) exe = fluid.Executor(fluid.CPUPlace()) (res,) = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_rmsprop_op.py b/python/paddle/fluid/tests/unittests/test_rmsprop_op.py index 01a58266ae..63b22fb9fc 100644 --- a/python/paddle/fluid/tests/unittests/test_rmsprop_op.py +++ b/python/paddle/fluid/tests/unittests/test_rmsprop_op.py @@ -277,8 +277,8 @@ class TestRMSPropV2(unittest.TestCase): place = fluid.CPUPlace() main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1) cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y diff --git a/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py b/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py index 9c9c5520c0..730daa1cbe 100644 --- a/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py +++ b/python/paddle/fluid/tests/unittests/test_rnn_cell_api.py @@ -22,7 +22,6 @@ from rnn.rnn_numpy import rnn as numpy_rnn import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers import paddle.fluid.layers.utils as utils from paddle.fluid import framework from paddle.fluid.executor import Executor @@ -42,10 +41,9 @@ class TestRnnError(unittest.TestCase): inputs = fluid.data( name='inputs', shape=[None, input_size], dtype='float32' ) - pre_hidden = layers.data( + pre_hidden = paddle.static.data( name='pre_hidden', shape=[None, hidden_size], - append_batch_size=False, dtype='float32', ) inputs_basic_lstm = fluid.data( diff --git a/python/paddle/fluid/tests/unittests/test_roll_op.py b/python/paddle/fluid/tests/unittests/test_roll_op.py index 1bb4e33926..9156f72894 100644 --- a/python/paddle/fluid/tests/unittests/test_roll_op.py +++ b/python/paddle/fluid/tests/unittests/test_roll_op.py @@ -68,7 +68,8 @@ class TestRollAPI(unittest.TestCase): paddle.enable_static() # case 1: with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 3]) + x = paddle.static.data(name='x', shape=[-1, 3], dtype='float32') + x.desc.set_need_check_feed(False) z = paddle.roll(x, shifts=1) exe = fluid.Executor(fluid.CPUPlace()) (res,) = exe.run( @@ -81,7 +82,8 @@ class TestRollAPI(unittest.TestCase): # case 2: with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 3]) + x = paddle.static.data(name='x', shape=[-1, 3], dtype='float32') + x.desc.set_need_check_feed(False) z = paddle.roll(x, shifts=1, axis=0) exe = fluid.Executor(fluid.CPUPlace()) (res,) = exe.run( @@ -119,7 +121,8 @@ class TestRollAPI(unittest.TestCase): def test_axis_out_range(): with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 3]) + x = paddle.static.data(name='x', shape=[-1, 3], dtype='float32') + x.desc.set_need_check_feed(False) z = paddle.roll(x, shifts=1, axis=10) exe = fluid.Executor(fluid.CPUPlace()) (res,) = exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_run_program_op.py b/python/paddle/fluid/tests/unittests/test_run_program_op.py index 35686f843d..fe012ded39 100644 --- a/python/paddle/fluid/tests/unittests/test_run_program_op.py +++ b/python/paddle/fluid/tests/unittests/test_run_program_op.py @@ -460,8 +460,8 @@ class TestRunProgramOpWithEmbedding(RunProgramOpTest): def build_model(self): # 1. simple model - x = fluid.layers.data( - name=self.input_names['X'][0], shape=[5], dtype='int64' + x = paddle.static.data( + name=self.input_names['X'][0], shape=[-1, 5], dtype='int64' ) emb = fluid.input.embedding( input=x, diff --git a/python/paddle/fluid/tests/unittests/test_save_model_without_var.py b/python/paddle/fluid/tests/unittests/test_save_model_without_var.py index 1394006807..97961b3df0 100644 --- a/python/paddle/fluid/tests/unittests/test_save_model_without_var.py +++ b/python/paddle/fluid/tests/unittests/test_save_model_without_var.py @@ -15,14 +15,13 @@ import unittest import warnings +import paddle import paddle.fluid as fluid class TestSaveModelWithoutVar(unittest.TestCase): def test_no_var_save(self): - data = fluid.layers.data( - name='data', shape=[-1, 1], dtype='float32', append_batch_size=False - ) + data = paddle.static.data(name='data', shape=[-1, 1], dtype='float32') data_plus = data + 1 if fluid.core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_scale_op.py b/python/paddle/fluid/tests/unittests/test_scale_op.py index 3fc7aedf02..f66ff39f10 100644 --- a/python/paddle/fluid/tests/unittests/test_scale_op.py +++ b/python/paddle/fluid/tests/unittests/test_scale_op.py @@ -22,7 +22,6 @@ from op_test import OpTest, convert_float_to_uint16 import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.fluid.op import Operator from paddle.static import Program, program_guard @@ -247,7 +246,7 @@ class TestScaleDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 3], False, dtype) + data = paddle.static.data('data', [2, 3], dtype) data.persistable = True out = paddle.scale(data, 2.0) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) @@ -278,7 +277,7 @@ class TestScaleTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 3], False, dtype) + data = paddle.static.data('data', [2, 3], dtype) data.persistable = True out = paddle.scale(data, 2.0) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_scatter_nd_op.py b/python/paddle/fluid/tests/unittests/test_scatter_nd_op.py index b0c58839b3..5c5009984e 100644 --- a/python/paddle/fluid/tests/unittests/test_scatter_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_scatter_nd_op.py @@ -167,44 +167,38 @@ class TestScatterNdOpAPI(unittest.TestCase): """ def testcase1(self): - ref1 = fluid.layers.data( + ref1 = paddle.static.data( name='ref1', shape=[10, 9, 8, 1, 3], dtype='float32', - append_batch_size=False, ) - index1 = fluid.layers.data( + index1 = paddle.static.data( name='index1', shape=[5, 5, 8, 5], dtype='int32', - append_batch_size=False, ) - updates1 = fluid.layers.data( + updates1 = paddle.static.data( name='update1', shape=[5, 5, 8], dtype='float32', - append_batch_size=False, ) output1 = paddle.scatter_nd_add(ref1, index1, updates1) def testcase2(self): - ref2 = fluid.layers.data( + ref2 = paddle.static.data( name='ref2', shape=[10, 9, 8, 1, 3], dtype='double', - append_batch_size=False, ) - index2 = fluid.layers.data( + index2 = paddle.static.data( name='index2', shape=[5, 8, 5], dtype='int32', - append_batch_size=False, ) - updates2 = fluid.layers.data( + updates2 = paddle.static.data( name='update2', shape=[5, 8], dtype='double', - append_batch_size=False, ) output2 = paddle.scatter_nd_add( ref2, index2, updates2, name="scatter_nd_add" @@ -212,33 +206,29 @@ class TestScatterNdOpAPI(unittest.TestCase): def testcase3(self): shape3 = [10, 9, 8, 1, 3] - index3 = fluid.layers.data( + index3 = paddle.static.data( name='index3', shape=[5, 5, 8, 5], dtype='int32', - append_batch_size=False, ) - updates3 = fluid.layers.data( + updates3 = paddle.static.data( name='update3', shape=[5, 5, 8], dtype='float32', - append_batch_size=False, ) output3 = paddle.scatter_nd(index3, updates3, shape3) def testcase4(self): shape4 = [10, 9, 8, 1, 3] - index4 = fluid.layers.data( + index4 = paddle.static.data( name='index4', shape=[5, 5, 8, 5], dtype='int32', - append_batch_size=False, ) - updates4 = fluid.layers.data( + updates4 = paddle.static.data( name='update4', shape=[5, 5, 8], dtype='double', - append_batch_size=False, ) output4 = paddle.scatter_nd(index4, updates4, shape4, name='scatter_nd') @@ -298,14 +288,14 @@ class TestScatterNdOpRaise(unittest.TestCase): def test_check_raise(self): def check_raise_is_test(): try: - ref5 = fluid.layers.data( - name='ref5', shape=[3, 4, 5], dtype='float32' + ref5 = paddle.static.data( + name='ref5', shape=[-1, 3, 4, 5], dtype='float32' ) - index5 = fluid.layers.data( - name='index5', shape=[2, 10], dtype='int32' + index5 = paddle.static.data( + name='index5', shape=[-1, 2, 10], dtype='int32' ) - updates5 = fluid.layers.data( - name='updates5', shape=[2, 10], dtype='float32' + updates5 = paddle.static.data( + name='updates5', shape=[-1, 2, 10], dtype='float32' ) output5 = paddle.scatter_nd_add(ref5, index5, updates5) except Exception as e: @@ -317,23 +307,20 @@ class TestScatterNdOpRaise(unittest.TestCase): def test_check_raise2(self): with self.assertRaises(ValueError): - ref6 = fluid.layers.data( + ref6 = paddle.static.data( name='ref6', shape=[10, 9, 8, 1, 3], dtype='double', - append_batch_size=False, ) - index6 = fluid.layers.data( + index6 = paddle.static.data( name='index6', shape=[5, 8, 5], dtype='int32', - append_batch_size=False, ) - updates6 = fluid.layers.data( + updates6 = paddle.static.data( name='update6', shape=[5, 8], dtype='float32', - append_batch_size=False, ) output6 = paddle.scatter_nd_add(ref6, index6, updates6) @@ -341,11 +328,11 @@ class TestScatterNdOpRaise(unittest.TestCase): def check_raise_is_test(): try: shape = [3, 4, 5] - index7 = fluid.layers.data( - name='index7', shape=[2, 1], dtype='int32' + index7 = paddle.static.data( + name='index7', shape=[-1, 2, 1], dtype='int32' ) - updates7 = fluid.layers.data( - name='updates7', shape=[2, 4, 5, 20], dtype='float32' + updates7 = paddle.static.data( + name='updates7', shape=[-1, 2, 4, 5, 20], dtype='float32' ) output7 = paddle.scatter_nd(index7, updates7, shape) except Exception as e: diff --git a/python/paddle/fluid/tests/unittests/test_select_input_output_op.py b/python/paddle/fluid/tests/unittests/test_select_input_output_op.py index c294e5f6e5..d92688d108 100644 --- a/python/paddle/fluid/tests/unittests/test_select_input_output_op.py +++ b/python/paddle/fluid/tests/unittests/test_select_input_output_op.py @@ -19,7 +19,6 @@ import numpy as np import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.fluid.backward import append_backward from paddle.fluid.executor import Executor from paddle.fluid.framework import Program, program_guard @@ -33,9 +32,11 @@ class TestSplitMergeSelectedVarOps(unittest.TestCase): for branch_num in range(2, 10): program = Program() with program_guard(program): - x = layers.data(name='x', shape=[2], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') x.stop_gradient = False # For test gradient - mask = layers.data(name='mask', shape=[1], dtype='int32') + mask = paddle.static.data( + name='mask', shape=[-1, 1], dtype='int32' + ) outputs = [] for i in range(branch_num): @@ -78,8 +79,8 @@ class TestSplitMergeSelectedVarOps(unittest.TestCase): class TestSelectInputOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - mask = layers.data(name='mask', shape=[1], dtype='int32') - in1 = layers.data(name='in1', shape=[1], dtype='int32') + mask = paddle.static.data(name='mask', shape=[-1, 1], dtype='int32') + in1 = paddle.static.data(name='in1', shape=[-1, 1], dtype='int32') # 1. The type of inputs in select_input must be list or tuple. def test_inputs_type(): @@ -95,7 +96,9 @@ class TestSelectInputOpError(unittest.TestCase): # 3. The dtype of mask in select_input must be int32 or int64. def test_mask_dtype(): - mask = layers.data(name='mask2', shape=[1], dtype='float32') + mask = paddle.static.data( + name='mask2', shape=[-1, 1], dtype='float32' + ) select_input([in1], mask) self.assertRaises(TypeError, test_mask_dtype) @@ -105,14 +108,14 @@ class TestSelectOutput_Error(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - in1 = layers.data(name='in1', shape=[1], dtype='int32') - mask_int32 = layers.data( - name='mask_int32', shape=[1], dtype='int32' + in1 = paddle.static.data(name='in1', shape=[-1, 1], dtype='int32') + mask_int32 = paddle.static.data( + name='mask_int32', shape=[-1, 1], dtype='int32' ) - mask_float32 = layers.data( - name='mask_float32', shape=[1], dtype='float32' + mask_float32 = paddle.static.data( + name='mask_float32', shape=[-1, 1], dtype='float32' ) - out1 = layers.data(name='out1', shape=[1], dtype='int32') + out1 = paddle.static.data(name='out1', shape=[-1, 1], dtype='int32') # 1. The type of input in select_output must Variable. def test_input_type(): diff --git a/python/paddle/fluid/tests/unittests/test_set_bool_attr.py b/python/paddle/fluid/tests/unittests/test_set_bool_attr.py index da02e4621d..c599f08ae2 100644 --- a/python/paddle/fluid/tests/unittests/test_set_bool_attr.py +++ b/python/paddle/fluid/tests/unittests/test_set_bool_attr.py @@ -21,7 +21,9 @@ import paddle.fluid as fluid class TestAttrSet(unittest.TestCase): def test_set_bool_attr(self): - x = fluid.layers.data(name='x', shape=[3, 7, 3, 7], dtype='float32') + x = paddle.static.data( + name='x', shape=[-1, 3, 7, 3, 7], dtype='float32' + ) param_attr = fluid.ParamAttr( name='batch_norm_w', initializer=fluid.initializer.Constant(value=1.0), diff --git a/python/paddle/fluid/tests/unittests/test_sgd_op_bf16.py b/python/paddle/fluid/tests/unittests/test_sgd_op_bf16.py index 81b8c1b2b1..89515c931c 100644 --- a/python/paddle/fluid/tests/unittests/test_sgd_op_bf16.py +++ b/python/paddle/fluid/tests/unittests/test_sgd_op_bf16.py @@ -334,9 +334,13 @@ class TestSGDOpBF16API(unittest.TestCase): place = fluid.CPUPlace() main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='X', shape=self.ids_shape, dtype='int64') - label = fluid.layers.data( - name='Y', shape=self.y_shape, dtype='uint16' + ids_shape = list(self.ids_shape) + x = paddle.static.data( + name='X', shape=[-1] + ids_shape, dtype='int64' + ) + y_shape = list(self.y_shape) + label = paddle.static.data( + name='Y', shape=[-1] + y_shape, dtype='uint16' ) emb = fluid.layers.embedding( input=x, diff --git a/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py b/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py index ce18a46479..de6e6bd725 100644 --- a/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py +++ b/python/paddle/fluid/tests/unittests/test_sigmoid_cross_entropy_with_logits_op.py @@ -286,11 +286,11 @@ class TestSigmoidCrossEntropyWithNorm2(OpTest): def test_dtype(): # the input dtype of sigmoid_cross_entropy_with_logits must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data( - name='x2', shape=[3, 4, 5, 6], dtype="int32" + x2 = paddle.static.data( + name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32" ) - lab2 = fluid.layers.data( - name='lab2', shape=[3, 4, 5, 6], dtype="int32" + lab2 = paddle.static.data( + name='lab2', shape=[-1, 3, 4, 5, 6], dtype="int32" ) paddle.nn.functional.binary_cross_entropy_with_logits( x2, lab2 diff --git a/python/paddle/fluid/tests/unittests/test_sign_op.py b/python/paddle/fluid/tests/unittests/test_sign_op.py index ca6ca55c91..7834736260 100644 --- a/python/paddle/fluid/tests/unittests/test_sign_op.py +++ b/python/paddle/fluid/tests/unittests/test_sign_op.py @@ -22,7 +22,6 @@ from op_test import OpTest import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.fluid import Program, program_guard @@ -48,16 +47,16 @@ class TestSignOpError(unittest.TestCase): input1 = 12 self.assertRaises(TypeError, paddle.sign, input1) # The input dtype of sign_op must be float16, float32, float64. - input2 = fluid.layers.data( - name='input2', shape=[12, 10], dtype="int32" + input2 = paddle.static.data( + name='input2', shape=[-1, 12, 10], dtype="int32" ) - input3 = fluid.layers.data( - name='input3', shape=[12, 10], dtype="int64" + input3 = paddle.static.data( + name='input3', shape=[-1, 12, 10], dtype="int64" ) self.assertRaises(TypeError, paddle.sign, input2) self.assertRaises(TypeError, paddle.sign, input3) - input4 = fluid.layers.data( - name='input4', shape=[4], dtype="float16" + input4 = paddle.static.data( + name='input4', shape=[-1, 4], dtype="float16" ) paddle.sign(input4) @@ -78,16 +77,16 @@ class TestSignAPI(unittest.TestCase): input1 = 12 self.assertRaises(TypeError, paddle.tensor.math.sign, input1) # The input dtype of sign_op must be float16, float32, float64. - input2 = fluid.layers.data( - name='input2', shape=[12, 10], dtype="int32" + input2 = paddle.static.data( + name='input2', shape=[-1, 12, 10], dtype="int32" ) - input3 = fluid.layers.data( - name='input3', shape=[12, 10], dtype="int64" + input3 = paddle.static.data( + name='input3', shape=[-1, 12, 10], dtype="int64" ) self.assertRaises(TypeError, paddle.tensor.math.sign, input2) self.assertRaises(TypeError, paddle.tensor.math.sign, input3) - input4 = fluid.layers.data( - name='input4', shape=[4], dtype="float16" + input4 = paddle.static.data( + name='input4', shape=[-1, 4], dtype="float16" ) paddle.sign(input4) @@ -102,7 +101,7 @@ class TestSignDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [1, 4], False, dtype) + data = paddle.static.data('data', [1, 4], dtype) data.persistable = True out = paddle.sign(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) @@ -133,7 +132,7 @@ class TestSignTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [1, 4], False, dtype) + data = paddle.static.data('data', [1, 4], dtype) data.persistable = True out = paddle.sign(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index 12838b218b..19aa669bad 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -22,7 +22,6 @@ from op_test import OpTest, convert_float_to_uint16 import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.tensor.manipulation import tensor_array_to_tensor paddle.enable_static() @@ -551,17 +550,15 @@ class TestSliceAPI(unittest.TestCase): input = np.random.random([3, 4, 5, 6]).astype("float64") minus_1 = fluid.layers.fill_constant([1], "int32", -1) minus_3 = fluid.layers.fill_constant([1], "int64", -3) - starts = fluid.layers.data( - name='starts', shape=[1, 3], append_batch_size=False + starts = paddle.static.data( + name='starts', shape=[1, 3], dtype="float32" ) - ends = fluid.layers.data( - name='ends', shape=[3], append_batch_size=False - ) - - x = fluid.layers.data( + starts.desc.set_need_check_feed(False) + ends = paddle.static.data(name='ends', shape=[3], dtype="float32") + ends.desc.set_need_check_feed(False) + x = paddle.static.data( name="x", shape=[3, 4, 5, 6], - append_batch_size=False, dtype="float64", ) @@ -885,7 +882,7 @@ class TestSliceDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [4, 5, 6], False, dtype) + data = paddle.static.data('data', [4, 5, 6], dtype) data.persistable = True out = paddle.slice( data, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[3, 2, 4] @@ -920,7 +917,7 @@ class TestSliceTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [4, 5, 6], False, dtype) + data = paddle.static.data('data', [4, 5, 6], dtype) data.persistable = True out = paddle.slice( data, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[3, 2, 4] diff --git a/python/paddle/fluid/tests/unittests/test_split_op.py b/python/paddle/fluid/tests/unittests/test_split_op.py index 943861ab2f..40e7bff55e 100644 --- a/python/paddle/fluid/tests/unittests/test_split_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_op.py @@ -304,34 +304,46 @@ class TestSplitOpError(unittest.TestCase): with program_guard(Program(), Program()): # The type of axis in split_op should be int or Variable. def test_axis_type(): - x6 = fluid.layers.data(shape=[4], dtype='float16', name='x3') + x6 = paddle.static.data( + shape=[-1, 4], dtype='float16', name='x3' + ) paddle.split(x=x6, num_or_sections=2, axis=3.2) self.assertRaises(TypeError, test_axis_type) # The type of axis in split_op should be int or Variable. def test_axis_variable_type(): - x9 = fluid.layers.data(shape=[4], dtype='float16', name='x9') - x10 = fluid.layers.data(shape=[1], dtype='float16', name='x10') + x9 = paddle.static.data( + shape=[-1, 4], dtype='float16', name='x9' + ) + x10 = paddle.static.data( + shape=[-1, 1], dtype='float16', name='x10' + ) paddle.split(x=x9, num_or_sections=2, axis=x10) self.assertRaises(TypeError, test_axis_variable_type) # The type of num_or_sections in split_op should be int, tuple or list. def test_num_or_sections_type(): - x6 = fluid.layers.data(shape=[4], dtype='float16', name='x4') + x6 = paddle.static.data( + shape=[-1, 4], dtype='float16', name='x4' + ) paddle.split(x=x6, num_or_sections=2.1, axis=3) self.assertRaises(TypeError, test_num_or_sections_type) def test_num_or_sections_type_tensor(): - x7 = fluid.layers.data(shape=[4], dtype='float16', name='x5') + x7 = paddle.static.data( + shape=[-1, 4], dtype='float16', name='x5' + ) paddle.split(input=x7, num_or_sections=2.1, dim=3) self.assertRaises(TypeError, test_num_or_sections_type_tensor) def test_axis_type_tensor(): - x8 = fluid.layers.data(shape=[4], dtype='float16', name='x6') + x8 = paddle.static.data( + shape=[-1, 4], dtype='float16', name='x6' + ) paddle.split(input=x8, num_or_sections=2, dim=3.2) self.assertRaises(TypeError, test_axis_type_tensor) @@ -340,8 +352,12 @@ class TestSplitOpError(unittest.TestCase): class API_TestSplit(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.layers.data('data1', shape=[4, 6, 6], dtype='float64') - data2 = fluid.layers.data('data2', shape=[1], dtype='int32') + data1 = paddle.static.data( + 'data1', shape=[-1, 4, 6, 6], dtype='float64' + ) + data1.desc.set_need_check_feed(False) + data2 = paddle.static.data('data2', shape=[-1, 1], dtype='int32') + data2.desc.set_need_check_feed(False) x0, x1, x2 = paddle.split(data1, num_or_sections=3, axis=data2) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -359,7 +375,10 @@ class API_TestSplit(unittest.TestCase): class API_TestSplit2(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.layers.data('data1', shape=[4, 6, 6], dtype='float64') + data1 = paddle.static.data( + 'data1', shape=[-1, 4, 6, 6], dtype='float64' + ) + data1.desc.set_need_check_feed(False) x0, x1, x2 = paddle.split(data1, num_or_sections=3, axis=2) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -378,7 +397,7 @@ class API_TestSplit2(unittest.TestCase): class API_TestSplit3(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data = fluid.layers.data('data', shape=[-1, 10], dtype='float64') + data = paddle.static.data('data', shape=[-1, 10], dtype='float64') x0, x1 = paddle.split(data, num_or_sections=(3, 7), axis=1) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -392,8 +411,8 @@ class API_TestSplit3(unittest.TestCase): class API_TestSplit4(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data = fluid.layers.data('data', shape=[-1, 10], dtype='float64') - index = fluid.layers.data('index', shape=[1], dtype='int32') + data = paddle.static.data('data', shape=[-1, 10], dtype='float64') + index = paddle.static.data('index', shape=[1], dtype='int32') x0, x1 = paddle.split(data, num_or_sections=(3, index), axis=1) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -430,7 +449,7 @@ class API_TestSplit5(unittest.TestCase): class API_TestSplit6(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data = fluid.layers.data('data', shape=[-1, 10], dtype='float64') + data = paddle.static.data('data', shape=[-1, 10], dtype='float64') x0, x1 = paddle.split(data, num_or_sections=[1, 1], axis=0) place = fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_squeeze_op.py b/python/paddle/fluid/tests/unittests/test_squeeze_op.py index b60152a514..ae3b67a2f1 100755 --- a/python/paddle/fluid/tests/unittests/test_squeeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_squeeze_op.py @@ -22,7 +22,6 @@ from op_test import OpTest, convert_float_to_uint16 import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.fluid import Program, program_guard paddle.enable_static() @@ -228,7 +227,7 @@ class TestSqueezeDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 3], False, dtype) + data = paddle.static.data('data', [2, 3], dtype) data.persistable = True out = paddle.squeeze(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) @@ -259,7 +258,7 @@ class TestSqueezeTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 3], False, dtype) + data = paddle.static.data('data', [2, 3], dtype) data.persistable = True out = paddle.squeeze(data) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_stack_op.py b/python/paddle/fluid/tests/unittests/test_stack_op.py index 9e84268a5f..496877dc55 100644 --- a/python/paddle/fluid/tests/unittests/test_stack_op.py +++ b/python/paddle/fluid/tests/unittests/test_stack_op.py @@ -226,9 +226,9 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): class API_test(unittest.TestCase): def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.layers.data('data1', shape=[1, 2], dtype='float64') - data2 = fluid.layers.data('data2', shape=[1, 2], dtype='float64') - data3 = fluid.layers.data('data3', shape=[1, 2], dtype='float64') + data1 = paddle.static.data('data1', shape=[1, 2], dtype='float64') + data2 = paddle.static.data('data2', shape=[1, 2], dtype='float64') + data3 = paddle.static.data('data3', shape=[1, 2], dtype='float64') result_stack = paddle.stack([data1, data2, data3], axis=0) place = fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_static_save_load.py b/python/paddle/fluid/tests/unittests/test_static_save_load.py index e3309e18a4..d043e3785c 100644 --- a/python/paddle/fluid/tests/unittests/test_static_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_static_save_load.py @@ -281,16 +281,20 @@ class TestSaveLoadBase(unittest.TestCase): place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data( - name="init_hidden", shape=[1], dtype='float32' + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32') + y.desc.set_need_check_feed(False) + init_hidden = paddle.static.data( + name="init_hidden", shape=[-1, 1], dtype='float32' ) - init_cell = fluid.layers.data( - name="init_cell", shape=[1], dtype='float32' + init_hidden.desc.set_need_check_feed(False) + init_cell = paddle.static.data( + name="init_cell", shape=[-1, 1], dtype='float32' ) + init_cell.desc.set_need_check_feed(False) static_loss, static_last_hidden, static_last_cell = ptb_model( x, y, init_hidden, init_cell @@ -406,16 +410,20 @@ class TestSaveLoadPartial(unittest.TestCase): place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data( - name="init_hidden", shape=[1], dtype='float32' + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32') + y.desc.set_need_check_feed(False) + init_hidden = paddle.static.data( + name="init_hidden", shape=[-1, 1], dtype='float32' ) - init_cell = fluid.layers.data( - name="init_cell", shape=[1], dtype='float32' + init_hidden.desc.set_need_check_feed(False) + init_cell = paddle.static.data( + name="init_cell", shape=[-1, 1], dtype='float32' ) + init_cell.desc.set_need_check_feed(False) static_loss, static_last_hidden, static_last_cell = ptb_model( x, y, init_hidden, init_cell @@ -544,16 +552,20 @@ class TestSaveLoadSetStateDict(unittest.TestCase): place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data( - name="init_hidden", shape=[1], dtype='float32' + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32') + y.desc.set_need_check_feed(False) + init_hidden = paddle.static.data( + name="init_hidden", shape=[-1, 1], dtype='float32' ) - init_cell = fluid.layers.data( - name="init_cell", shape=[1], dtype='float32' + init_hidden.desc.set_need_check_feed(False) + init_cell = paddle.static.data( + name="init_cell", shape=[-1, 1], dtype='float32' ) + init_cell.desc.set_need_check_feed(False) static_loss, static_last_hidden, static_last_cell = ptb_model( x, y, init_hidden, init_cell @@ -665,16 +677,20 @@ class TestProgramStatePartial(unittest.TestCase): place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data( - name="init_hidden", shape=[1], dtype='float32' + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32') + y.desc.set_need_check_feed(False) + init_hidden = paddle.static.data( + name="init_hidden", shape=[-1, 1], dtype='float32' ) - init_cell = fluid.layers.data( - name="init_cell", shape=[1], dtype='float32' + init_hidden.desc.set_need_check_feed(False) + init_cell = paddle.static.data( + name="init_cell", shape=[-1, 1], dtype='float32' ) + init_cell.desc.set_need_check_feed(False) static_loss, static_last_hidden, static_last_cell = ptb_model( x, y, init_hidden, init_cell @@ -992,16 +1008,20 @@ class TestLoadFromOldInterface(unittest.TestCase): place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data( - name="init_hidden", shape=[1], dtype='float32' + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32') + y.desc.set_need_check_feed(False) + init_hidden = paddle.static.data( + name="init_hidden", shape=[-1, 1], dtype='float32' ) - init_cell = fluid.layers.data( - name="init_cell", shape=[1], dtype='float32' + init_hidden.desc.set_need_check_feed(False) + init_cell = paddle.static.data( + name="init_cell", shape=[-1, 1], dtype='float32' ) + init_cell.desc.set_need_check_feed(False) static_loss, static_last_hidden, static_last_cell = ptb_model( x, y, init_hidden, init_cell @@ -1131,17 +1151,20 @@ class TestLoadFromOldInterface(unittest.TestCase): place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data( - name="init_hidden", shape=[1], dtype='float32' + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32') + y.desc.set_need_check_feed(False) + init_hidden = paddle.static.data( + name="init_hidden", shape=[-1, 1], dtype='float32' ) - init_cell = fluid.layers.data( - name="init_cell", shape=[1], dtype='float32' + init_hidden.desc.set_need_check_feed(False) + init_cell = paddle.static.data( + name="init_cell", shape=[-1, 1], dtype='float32' ) - + init_cell.desc.set_need_check_feed(False) static_loss, static_last_hidden, static_last_cell = ptb_model( x, y, init_hidden, init_cell ) @@ -1271,16 +1294,20 @@ class TestLoadFromOldInterfaceSingleFile(unittest.TestCase): place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data( - name="init_hidden", shape=[1], dtype='float32' + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32') + y.desc.set_need_check_feed(False) + init_hidden = paddle.static.data( + name="init_hidden", shape=[-1, 1], dtype='float32' ) - init_cell = fluid.layers.data( - name="init_cell", shape=[1], dtype='float32' + init_hidden.desc.set_need_check_feed(False) + init_cell = paddle.static.data( + name="init_cell", shape=[-1, 1], dtype='float32' ) + init_cell.desc.set_need_check_feed(False) static_loss, static_last_hidden, static_last_cell = ptb_model( x, y, init_hidden, init_cell @@ -1462,16 +1489,20 @@ class TestProgramStateOldSave(unittest.TestCase): place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data( - name="init_hidden", shape=[1], dtype='float32' + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32') + y.desc.set_need_check_feed(False) + init_hidden = paddle.static.data( + name="init_hidden", shape=[-1, 1], dtype='float32' ) - init_cell = fluid.layers.data( - name="init_cell", shape=[1], dtype='float32' + init_hidden.desc.set_need_check_feed(False) + init_cell = paddle.static.data( + name="init_cell", shape=[-1, 1], dtype='float32' ) + init_cell.desc.set_need_check_feed(False) static_loss, static_last_hidden, static_last_cell = ptb_model( x, y, init_hidden, init_cell @@ -1634,16 +1665,20 @@ class TestProgramStateOldSaveSingleModel(unittest.TestCase): place = self.set_place() exe = fluid.Executor(place) sgd = Adam(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data( - name="init_hidden", shape=[1], dtype='float32' + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32') + y.desc.set_need_check_feed(False) + init_hidden = paddle.static.data( + name="init_hidden", shape=[-1, 1], dtype='float32' ) - init_cell = fluid.layers.data( - name="init_cell", shape=[1], dtype='float32' + init_hidden.desc.set_need_check_feed(False) + init_cell = paddle.static.data( + name="init_cell", shape=[-1, 1], dtype='float32' ) + init_cell.desc.set_need_check_feed(False) static_loss, static_last_hidden, static_last_cell = ptb_model( x, y, init_hidden, init_cell @@ -1769,6 +1804,7 @@ class TestStaticSaveLoadPickle(unittest.TestCase): shape=[None, 10], dtype='float32', ) + x.desc.set_need_check_feed(False) z = paddle.static.nn.fc(x, 10, bias_attr=False) place = paddle.CPUPlace() exe = paddle.static.Executor(place) @@ -1838,6 +1874,7 @@ class TestSaveLoadInferenceModel(unittest.TestCase): main_program = framework.Program() with framework.program_guard(main_program): x = paddle.static.data(name="x", shape=[10, 10], dtype='float32') + x.desc.set_need_check_feed(False) y = x + x place = paddle.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_static_save_load_bf16.py b/python/paddle/fluid/tests/unittests/test_static_save_load_bf16.py index 5a13133bf7..024f31fdf9 100644 --- a/python/paddle/fluid/tests/unittests/test_static_save_load_bf16.py +++ b/python/paddle/fluid/tests/unittests/test_static_save_load_bf16.py @@ -65,16 +65,20 @@ class TestSaveLoadBF16(unittest.TestCase): place = self.set_place() exe = fluid.Executor(place) sgd = SGDOptimizer(learning_rate=1e-3) - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' ) - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') - init_hidden = fluid.layers.data( - name="init_hidden", shape=[1], dtype='float32' + x.desc.set_need_check_feed(False) + y = paddle.static.data(name="y", shape=[-1, 1], dtype='float32') + y.desc.set_need_check_feed(False) + init_hidden = paddle.static.data( + name="init_hidden", shape=[-1, 1], dtype='float32' ) - init_cell = fluid.layers.data( - name="init_cell", shape=[1], dtype='float32' + init_hidden.desc.set_need_check_feed(False) + init_cell = paddle.static.data( + name="init_cell", shape=[-1, 1], dtype='float32' ) + init_cell.desc.set_need_check_feed(False) static_loss, static_last_hidden, static_last_cell = ptb_model( x, y, init_hidden, init_cell diff --git a/python/paddle/fluid/tests/unittests/test_static_shape_inferrence_for_shape_tensor.py b/python/paddle/fluid/tests/unittests/test_static_shape_inferrence_for_shape_tensor.py index 5ba3bcbbc1..9e2391233c 100644 --- a/python/paddle/fluid/tests/unittests/test_static_shape_inferrence_for_shape_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_static_shape_inferrence_for_shape_tensor.py @@ -21,9 +21,7 @@ from paddle.fluid.layers.utils import try_set_static_shape_tensor class StaticShapeInferrenceTest(unittest.TestCase): def test_static_graph(self): paddle.enable_static() - data = paddle.fluid.layers.data( - name="x", shape=[-1, 2], dtype='float32' - ) + data = paddle.static.data(name="x", shape=[-1, 2], dtype='float32') shape = paddle.shape(data) # shape should be [-1, 2] x = paddle.uniform(shape) try_set_static_shape_tensor(x, shape) diff --git a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py index 996b0c4a33..05a2631d77 100644 --- a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py @@ -550,20 +550,13 @@ class TestStridedSliceAPI(unittest.TestCase): input = np.random.random([3, 4, 5, 6]).astype("float64") minus_1 = fluid.layers.fill_constant([1], "int32", -1) minus_3 = fluid.layers.fill_constant([1], "int32", -3) - starts = fluid.layers.data( - name='starts', shape=[3], dtype='int32', append_batch_size=False - ) - ends = fluid.layers.data( - name='ends', shape=[3], dtype='int32', append_batch_size=False - ) - strides = fluid.layers.data( - name='strides', shape=[3], dtype='int32', append_batch_size=False - ) + starts = paddle.static.data(name='starts', shape=[3], dtype='int32') + ends = paddle.static.data(name='ends', shape=[3], dtype='int32') + strides = paddle.static.data(name='strides', shape=[3], dtype='int32') - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[3, 4, 5, 6], - append_batch_size=False, dtype="float64", ) out_1 = paddle.strided_slice( diff --git a/python/paddle/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py index 22b9dc573f..6e9ff86cb8 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_op.py @@ -23,7 +23,6 @@ from decorator_helper import prog_scope import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers import paddle.inference as paddle_infer from paddle import enable_static from paddle.fluid.op import Operator @@ -603,9 +602,9 @@ class TestAddNDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data1 = layers.data('data1', [3, 4, 5], False, dtype) + data1 = paddle.static.data('data1', [3, 4, 5], dtype) data1.persistable = True - data2 = layers.data('data2', [3, 4, 5], False, dtype) + data2 = paddle.static.data('data2', [3, 4, 5], dtype) data2.persistable = True out = paddle.add_n([data1, data2]) data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) @@ -645,9 +644,9 @@ class TestAddNTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data1 = layers.data('data1', [3, 4, 5], False, dtype) + data1 = paddle.static.data('data1', [3, 4, 5], dtype) data1.persistable = True - data2 = layers.data('data2', [3, 4, 5], False, dtype) + data2 = paddle.static.data('data2', [3, 4, 5], dtype) data2.persistable = True out = paddle.add_n([data1, data2]) data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype) @@ -687,7 +686,7 @@ class TestSumDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 4], False, dtype) + data = paddle.static.data('data', [2, 4], dtype) data.persistable = True out = paddle.sum(data, axis=1, keepdim=True) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) @@ -718,7 +717,7 @@ class TestSumTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 4], False, dtype) + data = paddle.static.data('data', [2, 4], dtype) data.persistable = True out = paddle.sum(data, axis=1, keepdim=True) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py index 1b8f912bb5..14cebe7e64 100644 --- a/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py @@ -67,12 +67,12 @@ class TestSyncBatchNormOpTraining(unittest.TestCase): use_cudnn = self.dtype == np.float16 with fluid.unique_name.guard(): with fluid.program_guard(main, startup): - data = fluid.layers.data( + data = paddle.static.data( name='input', shape=self.dshape, dtype=self.dtype, - append_batch_size=False, ) + data.desc.set_need_check_feed(False) conv = paddle.static.nn.conv2d( input=data, num_filters=32, @@ -248,7 +248,10 @@ class TestDygraphSyncBatchNormAPIError(unittest.TestCase): # the input dtype of SyncBatchNorm must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="int32") + x2 = paddle.static.data( + name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32" + ) + x2.desc.set_need_check_feed(False) self.assertRaises(TypeError, my_sync_batch_norm, x2) diff --git a/python/paddle/fluid/tests/unittests/test_tdm_child_op.py b/python/paddle/fluid/tests/unittests/test_tdm_child_op.py index 5d261dd1ef..2481a48f01 100644 --- a/python/paddle/fluid/tests/unittests/test_tdm_child_op.py +++ b/python/paddle/fluid/tests/unittests/test_tdm_child_op.py @@ -17,6 +17,7 @@ import unittest import numpy as np from op_test import OpTest +import paddle import paddle.fluid as fluid @@ -139,7 +140,9 @@ class TestCase4(TestTDMChildOp): class TestTDMChildShape(unittest.TestCase): def test_shape(self): - x = fluid.layers.data(name='x', shape=[1], dtype='int32', lod_level=1) + x = paddle.static.data( + name='x', shape=[-1, 1], dtype='int32', lod_level=1 + ) tdm_tree_info = create_tdm_tree() tree_info_np = np.array(tdm_tree_info).astype('int32') diff --git a/python/paddle/fluid/tests/unittests/test_tdm_sampler_op.py b/python/paddle/fluid/tests/unittests/test_tdm_sampler_op.py index 7a649a8c23..217d84b4b9 100644 --- a/python/paddle/fluid/tests/unittests/test_tdm_sampler_op.py +++ b/python/paddle/fluid/tests/unittests/test_tdm_sampler_op.py @@ -18,6 +18,7 @@ import unittest import numpy as np from op_test import OpTest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core @@ -266,7 +267,9 @@ class TestCase7(TestTDMSamplerOp): class TestTDMSamplerShape(unittest.TestCase): def test_shape(self): - x = fluid.layers.data(name='x', shape=[1], dtype='int32', lod_level=1) + x = paddle.static.data( + name='x', shape=[-1, 1], dtype='int32', lod_level=1 + ) tdm_tree_travel = create_tdm_travel() tdm_tree_layer = create_tdm_layer() layer_node_num_list = [len(i) for i in tdm_tree_layer] diff --git a/python/paddle/fluid/tests/unittests/test_tile_op.py b/python/paddle/fluid/tests/unittests/test_tile_op.py index 419c142b6d..bc549917e8 100644 --- a/python/paddle/fluid/tests/unittests/test_tile_op.py +++ b/python/paddle/fluid/tests/unittests/test_tile_op.py @@ -21,7 +21,6 @@ from op_test import OpTest import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import Program, core, program_guard @@ -229,9 +228,9 @@ class TestTileError(unittest.TestCase): ) repeat_times = [2, 2] self.assertRaises(TypeError, paddle.tile, x1, repeat_times) - x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") + x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="uint8") self.assertRaises(TypeError, paddle.tile, x2, repeat_times) - x3 = fluid.layers.data(name='x3', shape=[4], dtype="bool") + x3 = paddle.static.data(name='x3', shape=[-1, 4], dtype="bool") x3.stop_gradient = False self.assertRaises(ValueError, paddle.tile, x3, repeat_times) @@ -240,7 +239,7 @@ class TestTileAPIStatic(unittest.TestCase): def test_api(self): with program_guard(Program(), Program()): repeat_times = [2, 2] - x1 = fluid.layers.data(name='x1', shape=[4], dtype="int32") + x1 = paddle.static.data(name='x1', shape=[-1, 4], dtype="int32") out = paddle.tile(x1, repeat_times) positive_2 = fluid.layers.fill_constant([1], dtype="int32", value=2) out2 = paddle.tile(x1, repeat_times=[positive_2, 2]) @@ -278,7 +277,7 @@ class TestTileDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [1, 2], False, dtype) + data = paddle.static.data('data', [1, 2], dtype) data.persistable = True out = paddle.tile(data, [2, 1]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) @@ -309,7 +308,7 @@ class TestTileTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [1, 2], False, dtype) + data = paddle.static.data('data', [1, 2], dtype) data.persistable = True out = paddle.tile(data, [2, 1]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_trainable.py b/python/paddle/fluid/tests/unittests/test_trainable.py index a9f96230ff..86664d4f3b 100644 --- a/python/paddle/fluid/tests/unittests/test_trainable.py +++ b/python/paddle/fluid/tests/unittests/test_trainable.py @@ -22,8 +22,8 @@ import paddle.fluid as fluid def test_trainable(): - x = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + x = paddle.static.data(name='image', shape=[-1, 784], dtype='float32') + label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') feature = paddle.static.nn.fc( x, size=10, weight_attr=fluid.ParamAttr(trainable=False) ) diff --git a/python/paddle/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py index ad57289ffd..a2f922dcd8 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_transpose_op.py @@ -21,7 +21,6 @@ from decorator_helper import prog_scope import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers from paddle.fluid import Program, program_guard from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 @@ -289,7 +288,9 @@ class TestTransposeOpError(unittest.TestCase): def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[10, 5, 3], dtype='float64') + x = paddle.static.data( + name='x', shape=[-1, 10, 5, 3], dtype='float64' + ) def test_x_Variable_check(): # the Input(x)'s type must be Variable @@ -299,8 +300,8 @@ class TestTransposeOpError(unittest.TestCase): def test_x_dtype_check(): # the Input(x)'s dtype must be one of [bool, float16, float32, float64, int32, int64] - x1 = fluid.layers.data( - name='x1', shape=[10, 5, 3], dtype='int8' + x1 = paddle.static.data( + name='x1', shape=[-1, 10, 5, 3], dtype='int8' ) paddle.transpose(x1, perm=[1, 0, 2]) @@ -520,7 +521,7 @@ class TestTransposeDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 3, 4], False, dtype) + data = paddle.static.data('data', [2, 3, 4], dtype) data.persistable = True out = paddle.transpose(data, [1, 0, 2]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) @@ -551,7 +552,7 @@ class TestTransposeTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 3, 4], False, dtype) + data = paddle.static.data('data', [2, 3, 4], dtype) data.persistable = True out = paddle.transpose(data, [1, 0, 2]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index e86dec9899..c31d763dbf 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -280,7 +280,9 @@ class TestUniformRandomOpSelectedRowsWithDiagInit( class TestUniformRandomOpApi(unittest.TestCase): def test_api(self): paddle.seed(10) - x = fluid.layers.data('x', shape=[16], dtype='float32', lod_level=1) + x = paddle.static.data( + 'x', shape=[-1, 16], dtype='float32', lod_level=1 + ) y = paddle.static.nn.fc( x, size=16, @@ -467,16 +469,16 @@ class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable) def test_shape(): - x1 = fluid.layers.data( - name='x2', shape=[100, 784], dtype='float32' + x1 = paddle.static.data( + name='x2', shape=[-1, 100, 784], dtype='float32' ) random.uniform_random_batch_size_like(x1, shape="shape") self.assertRaises(TypeError, test_shape) def test_dtype(): - x2 = fluid.layers.data( - name='x2', shape=[100, 784], dtype='float32' + x2 = paddle.static.data( + name='x2', shape=[-1, 100, 784], dtype='float32' ) random.uniform_random_batch_size_like(x2, 'int32') @@ -516,8 +518,8 @@ class TestUniformOpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable2) def test_dtype(): - x2 = fluid.layers.data( - name='x2', shape=[100, 784], dtype='float32' + x2 = paddle.static.data( + name='x2', shape=[-1, 100, 784], dtype='float32' ) paddle.tensor.random.uniform(x2, 'int32') diff --git a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py index 44fd0888f6..fdb68a2779 100755 --- a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py @@ -22,7 +22,6 @@ from op_test import OpTest, convert_float_to_uint16 import paddle import paddle.fluid as fluid import paddle.fluid.core as core -import paddle.fluid.layers as layers paddle.enable_static() @@ -329,7 +328,7 @@ class TestUnsqueezeDoubleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 3, 4], False, dtype) + data = paddle.static.data('data', [2, 3, 4], dtype) data.persistable = True out = paddle.unsqueeze(data, [0, 2]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) @@ -360,7 +359,7 @@ class TestUnsqueezeTripleGradCheck(unittest.TestCase): eps = 0.005 dtype = np.float32 - data = layers.data('data', [2, 3, 4], False, dtype) + data = paddle.static.data('data', [2, 3, 4], dtype) data.persistable = True out = paddle.unsqueeze(data, [0, 2]) data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype) diff --git a/python/paddle/fluid/tests/unittests/test_variable.py b/python/paddle/fluid/tests/unittests/test_variable.py index 8520cf9067..94c098967e 100644 --- a/python/paddle/fluid/tests/unittests/test_variable.py +++ b/python/paddle/fluid/tests/unittests/test_variable.py @@ -168,7 +168,7 @@ class TestVariable(unittest.TestCase): var14 = var[1:-1, 0:2, ::-1] var15 = var[::-1, ::-1, ::-1] - x = fluid.layers.data(name='x', shape=[13], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') y = paddle.static.nn.fc(x, size=1, activation=None) y_1 = y[:, 0] feeder = fluid.DataFeeder(place=place, feed_list=[x]) diff --git a/python/paddle/fluid/tests/unittests/test_weight_decay.py b/python/paddle/fluid/tests/unittests/test_weight_decay.py index e125b5876f..9a36be6173 100644 --- a/python/paddle/fluid/tests/unittests/test_weight_decay.py +++ b/python/paddle/fluid/tests/unittests/test_weight_decay.py @@ -147,10 +147,12 @@ class TestWeightDecay(unittest.TestCase): startup_prog = fluid.framework.Program() startup_prog.random_seed = 1 with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog): - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1 + data = paddle.static.data( + name="words", shape=[-1, 1], dtype="int64", lod_level=1 + ) + label = paddle.static.data( + name="label", shape=[-1, 1], dtype="int64" ) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") avg_cost = model(data, label, len(self.word_dict)) param_list = [ diff --git a/python/paddle/fluid/tests/unittests/test_weight_normalization.py b/python/paddle/fluid/tests/unittests/test_weight_normalization.py index e57e3ef9f0..17a05bdb01 100644 --- a/python/paddle/fluid/tests/unittests/test_weight_normalization.py +++ b/python/paddle/fluid/tests/unittests/test_weight_normalization.py @@ -35,8 +35,8 @@ class TestWeightNormalization(unittest.TestCase): @classmethod def set_program(cls): - data = fluid.layers.data( - name=cls.data_desc[0][0], shape=cls.data_desc[0][1] + data = paddle.static.data( + name=cls.data_desc[0][0], shape=[-1] + cls.data_desc[0][1] ) out = paddle.static.nn.fc( x=data, diff --git a/python/paddle/fluid/tests/unittests/test_where_op.py b/python/paddle/fluid/tests/unittests/test_where_op.py index 34540f64e2..32ee83614b 100644 --- a/python/paddle/fluid/tests/unittests/test_where_op.py +++ b/python/paddle/fluid/tests/unittests/test_where_op.py @@ -78,17 +78,22 @@ class TestWhereAPI(unittest.TestCase): for x_stop_gradient in [False, True]: for y_stop_gradient in [False, True]: with fluid.program_guard(Program(), Program()): - cond = fluid.layers.data( - name='cond', shape=self.shape, dtype='bool' + cond = paddle.static.data( + name='cond', shape=[-1] + self.shape, dtype='bool' ) - x = fluid.layers.data( - name='x', shape=self.shape, dtype='float32' + cond.desc.set_need_check_feed(False) + x = paddle.static.data( + name='x', shape=[-1] + self.shape, dtype='float32' ) - y = fluid.layers.data( - name='y', shape=self.shape, dtype='float32' + x.desc.set_need_check_feed(False) + y = paddle.static.data( + name='y', shape=[-1] + self.shape, dtype='float32' ) + y.desc.set_need_check_feed(False) x.stop_gradient = x_stop_gradient + x.desc.set_need_check_feed(False) y.stop_gradient = y_stop_gradient + y.desc.set_need_check_feed(False) result = paddle.where(cond, x, y) append_backward(paddle.mean(result)) for use_cuda in [False, True]: @@ -127,8 +132,10 @@ class TestWhereAPI(unittest.TestCase): def test_api_broadcast(self, use_cuda=False): main_program = Program() with fluid.program_guard(main_program): - x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32') - y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 4, 1], dtype='float32') + x.desc.set_need_check_feed(False) + y = paddle.static.data(name='y', shape=[-1, 4, 2], dtype='float32') + y.desc.set_need_check_feed(False) x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype('float32') y_i = np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]).astype( 'float32' @@ -151,9 +158,10 @@ class TestWhereAPI(unittest.TestCase): main_program = Program() with fluid.program_guard(main_program): cond_shape = [2, 4] - cond = fluid.layers.data( - name='cond', shape=cond_shape, dtype='bool' + cond = paddle.static.data( + name='cond', shape=[-1] + cond_shape, dtype='bool' ) + cond.desc.set_need_check_feed(False) x_data = 1.0 y_data = 2.0 cond_data = np.array([False, False, True, True]).astype('bool') @@ -175,11 +183,18 @@ class TestWhereAPI(unittest.TestCase): paddle.enable_static() main_program = Program() with fluid.program_guard(main_program): - cond = fluid.layers.data( - name='cond', shape=cond_shape, dtype='bool' + cond = paddle.static.data( + name='cond', shape=[-1] + cond_shape, dtype='bool' ) - x = fluid.layers.data(name='x', shape=x_shape, dtype='float32') - y = fluid.layers.data(name='y', shape=y_shape, dtype='float32') + x = paddle.static.data( + name='x', shape=[-1] + x_shape, dtype='float32' + ) + y = paddle.static.data( + name='y', shape=[-1] + y_shape, dtype='float32' + ) + x.desc.set_need_check_feed(False) + y.desc.set_need_check_feed(False) + cond.desc.set_need_check_feed(False) cond_data_tmp = np.random.random(size=cond_shape).astype('float32') cond_data = cond_data_tmp < 0.3 x_data = np.random.random(size=x_shape).astype('float32') @@ -330,7 +345,8 @@ class TestWhereDygraphAPI(unittest.TestCase): def test_where_condition(self): data = np.array([[True, False], [False, True]]) with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[(-1), 2]) + x = paddle.static.data(name='x', shape=[(-1), 2], dtype='float32') + x.desc.set_need_check_feed(False) y = paddle.where(x) self.assertEqual(type(y), tuple) self.assertEqual(len(y), 2) @@ -343,7 +359,8 @@ class TestWhereDygraphAPI(unittest.TestCase): np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) data = np.array([True, True, False]) with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[(-1)]) + x = paddle.static.data(name='x', shape=[(-1)], dtype='float32') + x.desc.set_need_check_feed(False) y = paddle.where(x) self.assertEqual(type(y), tuple) self.assertEqual(len(y), 1) @@ -369,9 +386,14 @@ class TestWhereOpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable) def test_type(): - x = fluid.layers.data(name='x', shape=[4], dtype='bool') - y = fluid.layers.data(name='y', shape=[4], dtype='float16') - cond = fluid.layers.data(name='cond', shape=[4], dtype='int32') + x = paddle.static.data(name='x', shape=[-1, 4], dtype='bool') + x.desc.set_need_check_feed(False) + y = paddle.static.data(name='y', shape=[-1, 4], dtype='float16') + y.desc.set_need_check_feed(False) + cond = paddle.static.data( + name='cond', shape=[-1, 4], dtype='int32' + ) + cond.desc.set_need_check_feed(False) paddle.where(cond, x, y) self.assertRaises(TypeError, test_type) diff --git a/python/paddle/fluid/tests/unittests/test_while_loop_op.py b/python/paddle/fluid/tests/unittests/test_while_loop_op.py index c5c31ac571..c0a5b6a8cb 100644 --- a/python/paddle/fluid/tests/unittests/test_while_loop_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_loop_op.py @@ -608,7 +608,7 @@ class TestApiWhileLoopSliceInBody(unittest.TestCase): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): - x = fluid.layers.data(name='x', shape=[5], dtype='int32') + x = paddle.static.data(name='x', shape=[-1, 5], dtype='int32') z = fluid.layers.fill_constant([1], 'int32', 0) x_shape = paddle.shape(x) i = fluid.layers.fill_constant([1], 'int32', 0) diff --git a/python/paddle/fluid/tests/unittests/test_while_op.py b/python/paddle/fluid/tests/unittests/test_while_op.py index 64b1ad2125..a78dbe6b7e 100644 --- a/python/paddle/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_op.py @@ -28,15 +28,9 @@ paddle.enable_static() class TestWhileOp(unittest.TestCase): def simple_net(self): - d0 = layers.data( - "d0", shape=[10], append_batch_size=False, dtype='float32' - ) - d1 = layers.data( - "d1", shape=[10], append_batch_size=False, dtype='float32' - ) - d2 = layers.data( - "d2", shape=[10], append_batch_size=False, dtype='float32' - ) + d0 = paddle.static.data("d0", shape=[10], dtype='float32') + d1 = paddle.static.data("d1", shape=[10], dtype='float32') + d2 = paddle.static.data("d2", shape=[10], dtype='float32') i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True init = layers.zeros(shape=[10], dtype='float32') @@ -151,8 +145,10 @@ class TestIgnoreVarNameInWhile(unittest.TestCase): i = i + 1 return [i, ten, batch_info, origin_seq] - x = fluid.layers.data(name='x', shape=[-1, 1, 4]) - y = fluid.layers.data(name='y', shape=[-1, 1, 1]) + x = paddle.static.data(name='x', shape=[-1, 1, 4], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 1, 1], dtype='float32') + x.desc.set_need_check_feed(False) + y.desc.set_need_check_feed(False) temp = layers.concat(input=[x, y], axis=-1) i = layers.fill_constant(shape=[1], value=0, dtype='int32') num = layers.fill_constant(shape=[1], value=5, dtype='int32') @@ -207,7 +203,7 @@ class TestOutputsMustExistsInputs(unittest.TestCase): return s paddle.enable_static() - x = paddle.static.data(shape=[-1], name='x') + x = paddle.static.data(shape=[-1], name='x', dtype='float32') func(x) for op in main_program.block(0).ops: if op.type == "while": diff --git a/python/paddle/fluid/tests/unittests/transformer_model.py b/python/paddle/fluid/tests/unittests/transformer_model.py index f4f755c131..42436b6e24 100644 --- a/python/paddle/fluid/tests/unittests/transformer_model.py +++ b/python/paddle/fluid/tests/unittests/transformer_model.py @@ -499,11 +499,13 @@ def build_inputs(max_length, n_head): all_inputs = [] for name, shape, dtype in zip(names, shapes, dtypes): - all_inputs.append( - fluid.layers.data( - name=name, shape=shape, dtype=dtype, append_batch_size=False - ) + data_input = paddle.static.data( + name=name, + shape=shape, + dtype=dtype, ) + data_input.desc.set_need_check_feed(False) + all_inputs.append(data_input) return all_inputs diff --git a/python/paddle/fluid/tests/unittests/xpu/collective_allgather_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/collective_allgather_op_xpu.py index 55304b8b40..5cd5a92f4b 100644 --- a/python/paddle/fluid/tests/unittests/xpu/collective_allgather_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/collective_allgather_op_xpu.py @@ -16,7 +16,6 @@ from test_collective_base_xpu import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -30,8 +29,8 @@ class TestCollectiveAllGather(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) toutdata = main_prog.current_block().create_var( name="outofgather", diff --git a/python/paddle/fluid/tests/unittests/xpu/collective_allreduce_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/collective_allreduce_op_xpu.py index 32196b77d2..54b60f7666 100644 --- a/python/paddle/fluid/tests/unittests/xpu/collective_allreduce_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/collective_allreduce_op_xpu.py @@ -18,7 +18,6 @@ from test_collective_base_xpu import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -31,8 +30,8 @@ class TestCollectiveAllReduce(TestCollectiveRunnerBase): def get_model(self, main_prog, startup_program): ring_id = 0 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) toutdata = main_prog.current_block().create_var( name="outofreduce", diff --git a/python/paddle/fluid/tests/unittests/xpu/collective_identity_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/collective_identity_op_xpu.py index 5a17539cbf..8fea9d7a4a 100644 --- a/python/paddle/fluid/tests/unittests/xpu/collective_identity_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/collective_identity_op_xpu.py @@ -16,7 +16,6 @@ from test_collective_base_xpu import TestCollectiveRunnerBase, runtime_main import paddle import paddle.fluid as fluid -import paddle.fluid.layers as layers from paddle.fluid import core paddle.enable_static() @@ -30,8 +29,8 @@ class TestCollectiveIdentity(TestCollectiveRunnerBase): ring_id = 0 nranks = 2 with fluid.program_guard(main_prog, startup_program): - tindata = layers.data( - name="tindata", shape=[10, 1000], dtype='float32' + tindata = paddle.static.data( + name="tindata", shape=[-1, 10, 1000], dtype='float32' ) toutdata = main_prog.current_block().create_var( name="outofgather", diff --git a/python/paddle/fluid/tests/unittests/xpu/test_adadelta_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_adadelta_op_xpu.py index f503e3cd4f..4de3672850 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_adadelta_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_adadelta_op_xpu.py @@ -168,8 +168,12 @@ class XPUTestAdadelta(XPUOpTestWrapper): place = fluid.XPUPlace(0) main = fluid.Program() with fluid.program_guard(main): - x = fluid.layers.data(name='x', shape=[13], dtype=self.dtype) - y = fluid.layers.data(name='y', shape=[1], dtype=self.dtype) + x = paddle.static.data( + name='x', shape=[-1, 13], dtype=self.dtype + ) + y = paddle.static.data( + name='y', shape=[-1, 1], dtype=self.dtype + ) y_predict = paddle.static.nn.fc(x, size=1, activation=None) cost = paddle.nn.functional.square_error_cost( input=y_predict, label=y diff --git a/python/paddle/fluid/tests/unittests/xpu/test_conv3d_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_conv3d_op_xpu.py index 915fb24951..1367b972dd 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_conv3d_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_conv3d_op_xpu.py @@ -22,7 +22,6 @@ from op_test_xpu import XPUOpTest from xpu.get_test_cover_info import XPUOpTestWrapper, create_test_class import paddle -import paddle.fluid as fluid def conv3d_forward_naive( @@ -567,17 +566,15 @@ class XPUTestConv3DOp_v2(XPUOpTestWrapper): class TestConv3DAPI(unittest.TestCase): def test_api(self): - input_NDHWC = fluid.layers.data( + input_NDHWC = paddle.static.data( name="input_NDHWC", shape=[2, 5, 5, 5, 3], - append_batch_size=False, dtype="float32", ) - input_NCDHW = fluid.layers.data( + input_NCDHW = paddle.static.data( name="input_NCDHW", shape=[2, 3, 5, 5, 3], - append_batch_size=False, dtype="float32", ) @@ -650,10 +647,9 @@ class TestConv3DAPI(unittest.TestCase): class TestConv3DAPI_Error(unittest.TestCase): def test_api(self): - input = fluid.layers.data( + input = paddle.static.data( name="input", shape=[2, 5, 5, 5, 4], - append_batch_size=False, dtype="float32", ) @@ -736,10 +732,9 @@ class TestConv3DAPI_Error(unittest.TestCase): self.assertRaises(ValueError, run_5) # ValueError: channel dimmention - x = fluid.layers.data( + x = paddle.static.data( name="x", shape=[2, 5, 5, 5, -1], - append_batch_size=False, dtype="float32", ) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py index 011dd8fb9d..fca45412fa 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py @@ -141,8 +141,8 @@ class XPUTestDropoutOp(XPUOpTestWrapper): def test_dtype(): # the input dtype of dropout must be float16 or float32 or float64 # float16 only can be set on GPU place - x2 = fluid.layers.data( - name='x2', shape=[3, 4, 5, 6], dtype="int32" + x2 = paddle.static.data( + name='x2', shape=[-1, 3, 4, 5, 6], dtype="int32" ) paddle.nn.functional.dropout(x2, p=0.5) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_expand_as_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_expand_as_v2_op_xpu.py index 9f7d39ca34..7302e4ebe7 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_expand_as_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_expand_as_v2_op_xpu.py @@ -134,14 +134,11 @@ class TestExpandAsV2API(unittest.TestCase): def test_api(self): input1 = np.random.random([12, 14]).astype("float32") input2 = np.random.random([2, 12, 14]).astype("float32") - x = fluid.layers.data( - name='x', shape=[12, 14], append_batch_size=False, dtype="float32" - ) + x = paddle.static.data(name='x', shape=[-1, 12, 14], dtype="float32") - y = fluid.layers.data( + y = paddle.static.data( name='target_tensor', shape=[2, 12, 14], - append_batch_size=False, dtype="float32", ) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_expand_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_expand_v2_op_xpu.py index 07039053dc..558b177d84 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_expand_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_expand_v2_op_xpu.py @@ -197,18 +197,16 @@ class TestExpandV2API(unittest.TestCase): def test_static(self): with fluid.program_guard(fluid.Program(), fluid.Program()): input = np.random.random([12, 14]).astype("float32") - x = fluid.layers.data( + x = paddle.static.data( name='x', - shape=[12, 14], - append_batch_size=False, + shape=[-1, 12, 14], dtype="float32", ) positive_2 = fluid.layers.fill_constant([1], "int32", 12) - expand_shape = fluid.layers.data( + expand_shape = paddle.static.data( name="expand_shape", - shape=[2], - append_batch_size=False, + shape=[-1, 2], dtype="int32", ) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_index_select_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_index_select_op_xpu.py index 85818e5a6a..eb16430e39 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_index_select_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_index_select_op_xpu.py @@ -102,10 +102,8 @@ class TestIndexSelectAPI(unittest.TestCase): # case 1: with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 4]) - index = fluid.layers.data( - name='index', shape=[3], dtype='int32', append_batch_size=False - ) + x = paddle.static.data(name='x', shape=[-1, 4]) + index = paddle.static.data(name='index', shape=[3], dtype='int32') z = paddle.index_select(x, index, axis=1) exe = fluid.Executor(fluid.XPUPlace(0)) (res,) = exe.run( @@ -120,10 +118,8 @@ class TestIndexSelectAPI(unittest.TestCase): # case 2: with program_guard(Program(), Program()): - x = fluid.layers.data(name='x', shape=[-1, 4]) - index = fluid.layers.data( - name='index', shape=[3], dtype='int32', append_batch_size=False - ) + x = paddle.static.data(name='x', shape=[-1, 4]) + index = paddle.static.data(name='index', shape=[3], dtype='int32') z = paddle.index_select(x, index) exe = fluid.Executor(fluid.XPUPlace(0)) (res,) = exe.run( diff --git a/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py index f0db43acb6..94645bcf9b 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_lookup_table_v2_op_xpu.py @@ -167,7 +167,7 @@ class TestLookupTableWithTensorIdsWIsSelectedRows( class TestLookupTableApi(unittest.TestCase): def test_api(self): - x = fluid.layers.data(name='x', shape=[20], dtype='int64') + x = paddle.static.data(name='x', shape=[-1, 20], dtype='int64') emb = fluid.embedding(input=x, size=[128, 64]) place = paddle.XPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_mean_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_mean_op_xpu.py index 3991862346..a13bea88b6 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_mean_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_mean_op_xpu.py @@ -17,11 +17,11 @@ import unittest import numpy as np +import paddle + sys.path.append("..") from op_test_xpu import XPUOpTest -import paddle -import paddle.fluid as fluid from paddle.fluid import Program, program_guard np.random.seed(10) @@ -91,12 +91,12 @@ class TestMeanOpError(unittest.TestCase): input1 = 12 self.assertRaises(TypeError, paddle.mean, input1) # The input dtype of mean_op must be float16, float32, float64. - input2 = fluid.layers.data( - name='input2', shape=[12, 10], dtype="int32" + input2 = paddle.static.data( + name='input2', shape=[-1, 12, 10], dtype="int32" ) self.assertRaises(TypeError, paddle.mean, input2) - input3 = fluid.layers.data( - name='input3', shape=[4], dtype="float16" + input3 = paddle.static.data( + name='input3', shape=[-1, 4], dtype="float16" ) paddle.nn.functional.softmax(input3) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_one_hot_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_one_hot_v2_op_xpu.py index dce5e263b9..0a5752989e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_one_hot_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_one_hot_v2_op_xpu.py @@ -167,7 +167,7 @@ class TestOneHotOpApi(unittest.TestCase): ) def _run(self, depth): - label = fluid.layers.data(name="label", shape=[1], dtype="int64") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") one_hot_label = fluid.one_hot(input=label, depth=depth) place = fluid.XPUPlace(0) @@ -191,10 +191,9 @@ class BadInputTestOnehotV2(unittest.TestCase): with fluid.program_guard(fluid.Program()): def test_bad_x(): - label = fluid.layers.data( + label = paddle.static.data( name="label", shape=[4], - append_batch_size=False, dtype="float32", ) one_hot_label = fluid.one_hot(input=label, depth=4) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_sequence_conv_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_sequence_conv_op_xpu.py index c3e49d1232..6c2fd3fc9f 100755 --- a/python/paddle/fluid/tests/unittests/xpu/test_sequence_conv_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_sequence_conv_op_xpu.py @@ -433,7 +433,7 @@ class TestSeqConvApi(unittest.TestCase): def test_api(self): import paddle.fluid as fluid - x = fluid.layers.data('x', shape=[32], lod_level=1) + x = paddle.static.data('x', shape=[-1, 32], lod_level=1) y = fluid.layers.sequence_conv( input=x, num_filters=2, filter_size=3, padding_start=None ) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_where_index_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_where_index_xpu.py index e2aa831263..c70f6f61f1 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_where_index_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_where_index_xpu.py @@ -101,7 +101,7 @@ for stype in support_types: class TestWhereOpError(unittest.TestCase): def test_api(self): with program_guard(Program(), Program()): - cond = fluid.layers.data(name='cond', shape=[4], dtype='bool') + cond = paddle.static.data(name='cond', shape=[-1, 4], dtype='bool') result = paddle.nonzero(cond) exe = fluid.Executor(paddle.XPUPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py index 4a080ba70c..45aa192d72 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py @@ -150,8 +150,8 @@ class TestXPUWhereAPI(unittest.TestCase): train_prog = fluid.Program() startup = fluid.Program() with fluid.program_guard(train_prog, startup): - x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32') - y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32') + x = paddle.static.data(name='x', shape=[-1, 4, 1], dtype='float32') + y = paddle.static.data(name='y', shape=[-1, 4, 2], dtype='float32') x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype("float32") y_i = np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]).astype( "float32" diff --git a/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py index 10e3a6fddc..56b93e4945 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py @@ -27,15 +27,9 @@ paddle.enable_static() class TestWhileOp(unittest.TestCase): def simple_net(self): - d0 = layers.data( - "d0", shape=[10], append_batch_size=False, dtype='float32' - ) - d1 = layers.data( - "d1", shape=[10], append_batch_size=False, dtype='float32' - ) - d2 = layers.data( - "d2", shape=[10], append_batch_size=False, dtype='float32' - ) + d0 = paddle.static.data("d0", shape=[10], dtype='float32') + d1 = paddle.static.data("d1", shape=[10], dtype='float32') + d2 = paddle.static.data("d2", shape=[10], dtype='float32') i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True init = layers.zeros(shape=[10], dtype='float32') diff --git a/tools/codestyle/test_docstring_checker.py b/tools/codestyle/test_docstring_checker.py index 8e18fd0703..ddc0ed185d 100644 --- a/tools/codestyle/test_docstring_checker.py +++ b/tools/codestyle/test_docstring_checker.py @@ -218,7 +218,7 @@ def fc(input, ValueError: If rank of the input tensor is less than 2. Examples: .. code-block:: python - data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + data = paddle.static.data(name="data", shape=[-1, 32, 32], dtype="float32") fc = paddle.static.nn.fc(x=data, size=1000, activation="tanh") """ raise ValueError('A very specific bad thing happened.') diff --git a/tools/infrt/fake_models/multi_fc.py b/tools/infrt/fake_models/multi_fc.py index ded9f67bd3..8e3987a352 100644 --- a/tools/infrt/fake_models/multi_fc.py +++ b/tools/infrt/fake_models/multi_fc.py @@ -21,8 +21,8 @@ size = 2 num_layers = 4 paddle.enable_static() -a = fluid.layers.data(name="A", shape=[-1, size], dtype='float32') -label = fluid.layers.data(name="label", shape=[size], dtype='float32') +a = paddle.static.data(name="A", shape=[-1, size], dtype='float32') +label = paddle.static.data(name="label", shape=[-1, size], dtype='float32') fc_out = paddle.static.nn.fc( x=a, -- GitLab