From 8757841e911f1ecc5b327cbba4466af2303795fd Mon Sep 17 00:00:00 2001 From: chentianyu03 Date: Fri, 21 Aug 2020 01:25:28 -0500 Subject: [PATCH] Modify alias (#2441) * modify alias * update alias mapping --- doc/paddle/api/alias_api_mapping | 1119 ++++++----------- doc/paddle/api/gen_doc.py | 18 +- .../paddle/{framework => }/CosineDecay_cn.rst | 0 .../{framework => }/ExponentialDecay_cn.rst | 0 .../{framework => }/InverseTimeDecay_cn.rst | 0 .../{framework => }/NaturalExpDecay_cn.rst | 0 .../paddle/{framework => }/NoamDecay_cn.rst | 0 .../paddle/{framework => }/ParallelEnv_cn.rst | 0 .../paddle/{framework => }/ParamAttr_cn.rst | 0 .../{framework => }/PiecewiseDecay_cn.rst | 0 .../{framework => }/PolynomialDecay_cn.rst | 0 .../paddle/{framework => }/Variable_cn.rst | 0 .../api/paddle/{fluid/layers => }/abs_cn.rst | 0 .../api/paddle/{fluid/layers => }/acos_cn.rst | 0 .../paddle/{fluid/layers => }/argmin_cn.rst | 0 .../api/paddle/{fluid/layers => }/asin_cn.rst | 0 doc/paddle/api/paddle/atan_cn.rst | 40 + .../api/paddle/{fluid/layers => }/cast_cn.rst | 0 .../api/paddle/{fluid/layers => }/ceil_cn.rst | 0 .../api/paddle/{fluid/layers => }/cos_cn.rst | 0 .../{framework => }/create_global_var_cn.rst | 0 .../{framework => }/create_parameter_cn.rst | 0 .../{fluid/layers => }/create_tensor_cn.rst | 0 .../{fluid/layers => }/crop_tensor_cn.rst | 0 .../api/paddle/dataset/common/split_cn.rst | 51 +- .../api/paddle/{fluid/layers => }/diag_cn.rst | 0 .../fleet/DatasetFactory_cn.rst | 0 .../fleet/InMemoryDataset_cn.rst | 0 .../fleet/QueueDataset_cn.rst | 0 .../meta_optimizers/LambOptimizer_cn.rst | 0 .../meta_optimizers/RecomputeOptimizer_cn.rst | 0 .../{fluid/layers => }/elementwise_add_cn.rst | 0 .../{fluid/layers => }/elementwise_div_cn.rst | 0 .../layers => }/elementwise_floordiv_cn.rst | 0 .../{fluid/layers => }/elementwise_mod_cn.rst | 0 .../{fluid/layers => }/elementwise_pow_cn.rst | 0 .../{fluid/layers => }/elementwise_sub_cn.rst | 0 .../api/paddle/{fluid/layers => }/erf_cn.rst | 0 .../api/paddle/{fluid/layers => }/exp_cn.rst | 0 .../{fluid/layers => }/expand_as_cn.rst | 0 .../paddle/{fluid/layers => }/expand_cn.rst | 0 .../{fluid/layers => }/fill_constant_cn.rst | 0 .../paddle/{fluid/layers => }/floor_cn.rst | 0 .../api/paddle/fluid/BuildStrategy_cn.rst | 198 --- doc/paddle/api/paddle/fluid/CPUPlace_cn.rst | 20 + .../api/paddle/fluid/CUDAPinnedPlace_cn.rst | 20 + doc/paddle/api/paddle/fluid/CUDAPlace_cn.rst | 33 + .../api/paddle/fluid/CompiledProgram_cn.rst | 114 -- .../api/paddle/fluid/ExecutionStrategy_cn.rst | 65 - doc/paddle/api/paddle/fluid/Executor_cn.rst | 272 ---- .../api/paddle/fluid/ParallelExecutor_cn.rst | 196 --- doc/paddle/api/paddle/fluid/Program_cn.rst | 450 ------- .../paddle/fluid/WeightNormParamAttr_cn.rst | 49 - .../paddle/fluid/default_main_program_cn.rst | 59 - .../fluid/default_startup_program_cn.rst | 45 - .../fluid/dygraph/base/disable_dygraph_cn.rst | 22 - .../fluid/dygraph/base/enable_dygraph_cn.rst | 25 - .../fluid/framework/in_dygraph_mode_cn.rst | 29 - .../api/paddle/fluid/global_scope_cn.rst | 28 - doc/paddle/api/paddle/fluid/gradients_cn.rst | 39 - .../paddle/fluid/initializer/Normal_cn.rst | 115 -- .../paddle/fluid/initializer/Uniform_cn.rst | 106 -- doc/paddle/api/paddle/fluid/io/load_cn.rst | 168 --- .../api/paddle/fluid/layers/atan_cn.rst | 38 - .../api/paddle/fluid/layers/cumsum_cn.rst | 41 - .../fluid/layers/elementwise_max_cn.rst | 109 -- .../fluid/layers/elementwise_min_cn.rst | 106 -- .../api/paddle/fluid/layers/flatten_cn.rst | 62 - .../paddle/fluid/layers/hard_shrink_cn.rst | 46 - .../fluid/layers/margin_rank_loss_cn.rst | 55 - .../api/paddle/fluid/layers/mean_cn.rst | 52 - doc/paddle/api/paddle/fluid/name_scope_cn.rst | 53 - .../api/paddle/fluid/program_guard_cn.rst | 43 - doc/paddle/api/paddle/fluid/save_cn.rst | 80 -- .../api/paddle/fluid/scope_guard_cn.rst | 36 - .../paddle/framework/get_default_dtype_cn.rst | 3 + doc/paddle/api/paddle/framework/load_cn.rst | 168 --- doc/paddle/api/paddle/framework/save_cn.rst | 80 -- .../paddle/framework/set_default_dtype_cn.rst | 3 + .../{fluid/layers => }/gather_nd_cn.rst | 0 .../api/paddle/{framework => }/grad_cn.rst | 0 .../paddle/{fluid/layers => }/has_inf_cn.rst | 0 .../paddle/{fluid/layers => }/has_nan_cn.rst | 0 .../{fluid/layers => }/increment_cn.rst | 0 .../paddle/{fluid/layers => }/is_empty_cn.rst | 0 .../paddle/{fluid/layers => }/isfinite_cn.rst | 0 doc/paddle/api/paddle/jit/load_cn.rst | 219 +--- doc/paddle/api/paddle/jit/save_cn.rst | 103 +- .../paddle/{fluid/layers => }/linspace_cn.rst | 25 +- doc/paddle/api/paddle/load_cn.rst | 55 + .../api/paddle/{fluid/layers => }/log_cn.rst | 0 .../{fluid/layers => }/logical_and_cn.rst | 0 .../{fluid/layers => }/logical_not_cn.rst | 0 .../{fluid/layers => }/logical_or_cn.rst | 0 .../{fluid/layers => }/logical_xor_cn.rst | 0 .../{fluid/layers => }/multiplex_cn.rst | 0 .../{fluid/dygraph => nn}/BatchNorm_cn.rst | 0 .../BilinearTensorProduct_cn.rst | 0 .../{fluid/dygraph => nn}/Embedding_cn.rst | 0 .../GradientClipByGlobalNorm_cn.rst | 0 .../clip => nn}/GradientClipByNorm_cn.rst | 0 .../clip => nn}/GradientClipByValue_cn.rst | 0 .../{fluid/dygraph => nn}/GroupNorm_cn.rst | 0 .../dygraph/container => nn}/LayerList_cn.rst | 0 .../{fluid/dygraph => nn}/LayerNorm_cn.rst | 0 .../{fluid/dygraph/layers => nn}/Layer_cn.rst | 0 .../{fluid/dygraph => nn}/Linear_cn.rst | 0 .../container => nn}/ParameterList_cn.rst | 0 .../{fluid/dygraph => nn}/Pool2D_cn.rst | 0 .../container => nn}/Sequential_cn.rst | 0 .../{fluid/dygraph => nn}/SpectralNorm_cn.rst | 0 .../{fluid/layers => nn}/beam_search_cn.rst | 0 .../layers => nn}/beam_search_decode_cn.rst | 0 .../paddle/{fluid/layers => nn}/case_cn.rst | 0 .../{fluid/layers => nn}/clip_by_norm_cn.rst | 0 .../paddle/{fluid/layers => nn}/clip_cn.rst | 0 .../paddle/{fluid/layers => nn}/cond_cn.rst | 0 .../api/paddle/{fluid => nn}/data_cn.rst | 0 .../functional/activation}/softmax_cn.rst | 0 .../functional}/adaptive_pool2d_cn.rst | 0 .../functional}/adaptive_pool3d_cn.rst | 0 .../functional}/add_position_encoding_cn.rst | 0 .../functional}/affine_channel_cn.rst | 0 .../functional}/affine_grid_cn.rst | 0 .../functional}/anchor_generator_cn.rst | 0 .../layers => nn/functional}/assign_cn.rst | 0 .../functional}/bipartite_match_cn.rst | 0 .../layers => nn/functional}/box_clip_cn.rst | 0 .../layers => nn/functional}/box_coder_cn.rst | 0 .../functional}/box_decoder_and_assign_cn.rst | 0 .../layers => nn/functional}/bpr_loss_cn.rst | 0 .../layers => nn/functional}/brelu_cn.rst | 0 .../functional}/center_loss_cn.rst | 0 .../functional}/collect_fpn_proposals_cn.rst | 0 .../functional}/continuous_value_model_cn.rst | 0 .../functional}/cosine_decay_cn.rst | 0 .../functional}/cross_entropy_cn.rst | 0 .../functional}/deformable_roi_pooling_cn.rst | 0 .../functional}/density_prior_box_cn.rst | 0 .../functional}/detection_output_cn.rst | 0 .../layers => nn/functional}/dice_loss_cn.rst | 0 .../distribute_fpn_proposals_cn.rst | 0 .../layers => nn/functional}/dropout_cn.rst | 0 .../functional}/edit_distance_cn.rst | 0 .../layers => nn/functional}/elu_cn.rst | 0 .../functional}/exponential_decay_cn.rst | 0 .../functional}/filter_by_instag_cn.rst | 0 .../functional}/fsp_matrix_cn.rst | 0 .../layers => nn/functional}/gelu_cn.rst | 0 .../functional}/generate_mask_labels_cn.rst | 0 .../generate_proposal_labels_cn.rst | 0 .../functional}/generate_proposals_cn.rst | 0 .../functional}/grid_sampler_cn.rst | 0 .../functional}/hard_sigmoid_cn.rst | 0 .../functional}/hard_swish_cn.rst | 0 .../layers => nn/functional}/hash_cn.rst | 0 .../functional}/huber_loss_cn.rst | 0 .../functional}/image_resize_cn.rst | 0 .../functional}/image_resize_short_cn.rst | 0 .../functional}/inverse_time_decay_cn.rst | 0 .../functional}/iou_similarity_cn.rst | 0 .../functional}/kldiv_loss_cn.rst | 0 .../functional}/l2_normalize_cn.rst | 0 .../functional}/label_smooth_cn.rst | 0 .../functional}/leaky_relu_cn.rst | 0 .../functional}/linear_lr_warmup_cn.rst | 0 .../layers => nn/functional}/log_loss_cn.rst | 0 .../functional}/logsigmoid_cn.rst | 0 .../functional/loss}/mse_loss_cn.rst | 0 .../layers => nn/functional}/lrn_cn.rst | 0 .../layers => nn/functional}/maxout_cn.rst | 0 .../api/paddle/nn/functional/mse_loss_cn.rst | 40 + .../functional}/multiclass_nms_cn.rst | 0 .../functional}/natural_exp_decay_cn.rst | 0 .../functional}/noam_decay_cn.rst | 0 .../functional}/npair_loss_cn.rst | 0 .../{fluid => nn/functional}/one_hot_cn.rst | 31 +- .../layers => nn/functional}/pad2d_cn.rst | 0 .../layers => nn/functional}/pad_cn.rst | 0 .../functional}/pad_constant_like_cn.rst | 0 .../functional}/piecewise_decay_cn.rst | 0 .../functional}/pixel_shuffle_cn.rst | 0 .../functional}/polygon_box_transform_cn.rst | 0 .../functional}/polynomial_decay_cn.rst | 0 .../layers => nn/functional}/pool2d_cn.rst | 0 .../layers => nn/functional}/pool3d_cn.rst | 0 .../layers => nn/functional}/prior_box_cn.rst | 0 .../functional}/prroi_pool_cn.rst | 0 .../functional}/psroi_pool_cn.rst | 0 .../functional}/random_crop_cn.rst | 0 .../layers => nn/functional}/rank_loss_cn.rst | 0 .../layers => nn/functional}/relu6_cn.rst | 0 .../functional}/resize_bilinear_cn.rst | 0 .../functional}/resize_nearest_cn.rst | 0 .../functional}/resize_trilinear_cn.rst | 0 .../retinanet_detection_output_cn.rst | 0 .../retinanet_target_assign_cn.rst | 0 .../layers => nn/functional}/roi_align_cn.rst | 0 .../roi_perspective_transform_cn.rst | 0 .../layers => nn/functional}/roi_pool_cn.rst | 0 .../functional}/rpn_target_assign_cn.rst | 0 .../sampled_softmax_with_cross_entropy_cn.rst | 0 .../layers => nn/functional}/selu_cn.rst | 0 .../functional}/shuffle_channel_cn.rst | 0 .../sigmoid_cross_entropy_with_logits_cn.rst | 0 .../functional}/sigmoid_focal_loss_cn.rst | 0 .../functional}/similarity_focus_cn.rst | 0 .../layers => nn/functional}/smooth_l1_cn.rst | 0 .../layers => nn/functional}/soft_relu_cn.rst | 0 .../api/paddle/nn/functional/softmax_cn.rst | 118 -- .../softmax_with_cross_entropy_cn.rst | 0 .../layers => nn/functional}/softplus_cn.rst | 0 .../functional}/softshrink_cn.rst | 0 .../layers => nn/functional}/softsign_cn.rst | 0 .../functional}/space_to_depth_cn.rst | 0 .../functional}/square_error_cost_cn.rst | 0 .../layers => nn/functional}/ssd_loss_cn.rst | 0 .../layers => nn/functional}/swish_cn.rst | 0 .../functional}/tanh_shrink_cn.rst | 0 .../functional}/target_assign_cn.rst | 0 .../teacher_student_sigmoid_loss_cn.rst | 0 .../functional}/temporal_shift_cn.rst | 0 .../functional}/thresholded_relu_cn.rst | 0 .../layers => nn/functional}/unfold_cn.rst | 0 .../layers => nn/functional}/warpctc_cn.rst | 0 .../layers => nn/functional}/yolo_box_cn.rst | 0 .../functional}/yolov3_loss_cn.rst | 0 .../{fluid/layers => nn}/gather_tree_cn.rst | 0 .../{fluid => nn}/initializer/Bilinear_cn.rst | 0 .../{fluid => nn}/initializer/Constant_cn.rst | 0 .../{fluid => nn}/initializer/MSRA_cn.rst | 0 .../api/paddle/nn/initializer/Normal_cn.rst | 16 + .../initializer/TruncatedNormal_cn.rst | 0 .../api/paddle/nn/initializer/Uniform_cn.rst | 17 + .../{fluid => nn}/initializer/Xavier_cn.rst | 0 .../api/paddle/nn/layer/conv/Conv2D_cn.rst | 122 +- .../{fluid/layers => nn}/switch_case_cn.rst | 0 .../{fluid/layers => nn}/while_loop_cn.rst | 0 .../api/paddle/{framework => }/no_grad_cn.rst | 0 .../{framework => }/prepare_context_cn.rst | 0 .../api/paddle/{fluid/layers => }/rank_cn.rst | 0 .../{fluid/layers => }/reciprocal_cn.rst | 0 .../{fluid/layers => }/reduce_all_cn.rst | 0 .../{fluid/layers => }/reduce_any_cn.rst | 0 .../{fluid/layers => }/reduce_max_cn.rst | 0 .../{fluid/layers => }/reduce_mean_cn.rst | 0 .../{fluid/layers => }/reduce_min_cn.rst | 0 .../{fluid/layers => }/reduce_prod_cn.rst | 0 .../{fluid/layers => }/reduce_sum_cn.rst | 0 .../paddle/{fluid/layers => }/reshape_cn.rst | 0 .../paddle/{fluid/layers => }/round_cn.rst | 0 .../paddle/{fluid/layers => }/rsqrt_cn.rst | 0 doc/paddle/api/paddle/save_cn.rst | 51 + .../paddle/{fluid/layers => }/scale_cn.rst | 0 .../paddle/{fluid/layers => }/scatter_cn.rst | 0 .../{fluid/layers => }/scatter_nd_add_cn.rst | 0 .../{fluid/layers => }/scatter_nd_cn.rst | 0 .../paddle/{fluid/layers => }/shape_cn.rst | 0 .../{fluid/layers => }/shard_index_cn.rst | 0 .../api/paddle/{fluid/io => }/shuffle_cn.rst | 0 .../api/paddle/{fluid/layers => }/sign_cn.rst | 0 .../api/paddle/{tensor/math => }/sin_cn.rst | 27 +- .../paddle/{fluid/layers => }/slice_cn.rst | 0 .../api/paddle/{tensor/math => }/sqrt_cn.rst | 29 +- .../paddle/{fluid/layers => }/square_cn.rst | 0 .../paddle/{fluid/layers => }/stanh_cn.rst | 0 .../api/paddle/static/global_scope_cn.rst | 4 +- doc/paddle/api/paddle/static/load_cn.rst | 55 + .../api/paddle/static/nn/batch_norm_cn.rst | 111 ++ .../static/nn/bilinear_tensor_product_cn.rst | 51 + doc/paddle/api/paddle/static/nn/conv2d_cn.rst | 114 ++ .../paddle/static/nn/conv2d_transpose_cn.rst | 120 ++ doc/paddle/api/paddle/static/nn/conv3d_cn.rst | 116 ++ .../paddle/static/nn/conv3d_transpose_cn.rst | 124 ++ .../paddle/static/nn/create_parameter_cn.rst | 44 + .../api/paddle/static/nn/crf_decoding_cn.rst | 60 + .../api/paddle/static/nn/data_norm_cn.rst | 65 + .../paddle/static/nn/deformable_conv_cn.rst | 102 ++ .../api/paddle/static/nn/embedding_cn.rst | 97 ++ doc/paddle/api/paddle/static/nn/fc_cn.rst | 114 ++ .../api/paddle/static/nn/group_norm_cn.rst | 51 + .../api/paddle/static/nn/hsigmoid_cn.rst | 60 + .../api/paddle/static/nn/instance_norm_cn.rst | 57 + .../api/paddle/static/nn/layer_norm_cn.rst | 59 + .../paddle/static/nn/multi_box_head_cn.rst | 112 ++ doc/paddle/api/paddle/static/nn/nce_cn.rst | 78 ++ doc/paddle/api/paddle/static/nn/prelu_cn.rst | 51 + .../api/paddle/static/nn/row_conv_cn.rst | 59 + .../api/paddle/static/nn/spectral_norm_cn.rst | 57 + doc/paddle/api/paddle/static/save_cn.rst | 51 + .../api/paddle/static/scope_guard_cn.rst | 4 +- .../{fluid/layers => }/strided_slice_cn.rst | 0 .../api/paddle/{fluid/layers => }/sums_cn.rst | 0 doc/paddle/api/paddle/tanh_cn.rst | 46 + .../api/paddle/tensor/creation/ones_cn.rst | 34 +- .../paddle/tensor/creation/ones_like_cn.rst | 45 +- .../api/paddle/tensor/creation/zeros_cn.rst | 30 +- .../paddle/tensor/creation/zeros_like_cn.rst | 41 +- .../paddle/tensor/manipulation/concat_cn.rst | 57 +- .../tensor/manipulation/expand_as_cn.rst | 68 + .../paddle/tensor/manipulation/expand_cn.rst | 71 ++ .../paddle/tensor/manipulation/gather_cn.rst | 55 +- .../paddle/tensor/manipulation/split_cn.rst | 51 +- .../paddle/tensor/manipulation/squeeze_cn.rst | 66 - doc/paddle/api/paddle/tensor/math/abs_cn.rst | 33 - doc/paddle/api/paddle/tensor/math/acos_cn.rst | 39 - doc/paddle/api/paddle/tensor/math/asin_cn.rst | 39 - doc/paddle/api/paddle/tensor/math/atan_cn.rst | 38 - doc/paddle/api/paddle/tensor/math/ceil_cn.rst | 42 - doc/paddle/api/paddle/tensor/math/cos_cn.rst | 44 - .../paddle/tensor/math/elementwise_add_cn.rst | 121 -- .../paddle/tensor/math/elementwise_div_cn.rst | 121 -- .../tensor/math/elementwise_floordiv_cn.rst | 116 -- .../paddle/tensor/math/elementwise_mod_cn.rst | 117 -- .../paddle/tensor/math/elementwise_pow_cn.rst | 84 -- .../paddle/tensor/math/elementwise_sub_cn.rst | 122 -- doc/paddle/api/paddle/tensor/math/erf_cn.rst | 75 -- doc/paddle/api/paddle/tensor/math/exp_cn.rst | 40 - .../api/paddle/tensor/math/floor_cn.rst | 35 - .../api/paddle/tensor/math/increment_cn.rst | 31 - doc/paddle/api/paddle/tensor/math/log_cn.rst | 49 - .../api/paddle/tensor/math/multiplex_cn.rst | 76 -- doc/paddle/api/paddle/tensor/math/pow_cn.rst | 58 +- .../api/paddle/tensor/math/reciprocal_cn.rst | 43 - .../api/paddle/tensor/math/reduce_max_cn.rst | 57 - .../api/paddle/tensor/math/reduce_min_cn.rst | 57 - .../api/paddle/tensor/math/reduce_prod_cn.rst | 58 - .../api/paddle/tensor/math/reduce_sum_cn.rst | 57 - .../api/paddle/tensor/math/round_cn.rst | 54 - .../api/paddle/tensor/math/rsqrt_cn.rst | 39 - .../api/paddle/tensor/math/scale_cn.rst | 77 -- doc/paddle/api/paddle/tensor/math/sign_cn.rst | 32 - .../api/paddle/tensor/math/square_cn.rst | 44 - .../api/paddle/tensor/math/stanh_cn.rst | 51 - doc/paddle/api/paddle/tensor/math/sum_cn.rst | 110 +- doc/paddle/api/paddle/tensor/math/sums_cn.rst | 60 - doc/paddle/api/paddle/tensor/math/tanh_cn.rst | 46 - .../api/paddle/tensor/search/argmax_cn.rst | 83 +- .../api/paddle/tensor/search/where_cn.rst | 61 +- .../paddle/{framework => }/to_variable_cn.rst | 0 .../api/paddle/{fluid/layers => }/topk_cn.rst | 0 .../{fluid/layers => }/transpose_cn.rst | 0 .../paddle/{fluid/layers => }/unique_cn.rst | 0 .../layers => }/unique_with_counts_cn.rst | 0 .../paddle/{fluid/layers => }/unstack_cn.rst | 0 345 files changed, 3236 insertions(+), 6462 deletions(-) rename doc/paddle/api/paddle/{framework => }/CosineDecay_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/ExponentialDecay_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/InverseTimeDecay_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/NaturalExpDecay_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/NoamDecay_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/ParallelEnv_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/ParamAttr_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/PiecewiseDecay_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/PolynomialDecay_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/Variable_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/abs_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/acos_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/argmin_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/asin_cn.rst (100%) create mode 100644 doc/paddle/api/paddle/atan_cn.rst rename doc/paddle/api/paddle/{fluid/layers => }/cast_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/ceil_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/cos_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/create_global_var_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/create_parameter_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/create_tensor_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/crop_tensor_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/diag_cn.rst (100%) rename doc/paddle/api/paddle/{ => distributed}/fleet/DatasetFactory_cn.rst (100%) rename doc/paddle/api/paddle/{ => distributed}/fleet/InMemoryDataset_cn.rst (100%) rename doc/paddle/api/paddle/{ => distributed}/fleet/QueueDataset_cn.rst (100%) rename doc/paddle/api/paddle/{ => distributed}/fleet/meta_optimizers/LambOptimizer_cn.rst (100%) rename doc/paddle/api/paddle/{ => distributed}/fleet/meta_optimizers/RecomputeOptimizer_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/elementwise_add_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/elementwise_div_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/elementwise_floordiv_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/elementwise_mod_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/elementwise_pow_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/elementwise_sub_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/erf_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/exp_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/expand_as_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/expand_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/fill_constant_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/floor_cn.rst (100%) delete mode 100644 doc/paddle/api/paddle/fluid/BuildStrategy_cn.rst create mode 100644 doc/paddle/api/paddle/fluid/CPUPlace_cn.rst create mode 100644 doc/paddle/api/paddle/fluid/CUDAPinnedPlace_cn.rst create mode 100644 doc/paddle/api/paddle/fluid/CUDAPlace_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/CompiledProgram_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/ExecutionStrategy_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/Executor_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/ParallelExecutor_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/Program_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/WeightNormParamAttr_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/default_main_program_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/default_startup_program_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/dygraph/base/disable_dygraph_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/dygraph/base/enable_dygraph_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/framework/in_dygraph_mode_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/global_scope_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/gradients_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/initializer/Normal_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/initializer/Uniform_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/io/load_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/layers/atan_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/layers/cumsum_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/layers/elementwise_max_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/layers/elementwise_min_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/layers/flatten_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/layers/hard_shrink_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/layers/margin_rank_loss_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/layers/mean_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/name_scope_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/program_guard_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/save_cn.rst delete mode 100644 doc/paddle/api/paddle/fluid/scope_guard_cn.rst create mode 100644 doc/paddle/api/paddle/framework/get_default_dtype_cn.rst delete mode 100644 doc/paddle/api/paddle/framework/load_cn.rst delete mode 100644 doc/paddle/api/paddle/framework/save_cn.rst create mode 100644 doc/paddle/api/paddle/framework/set_default_dtype_cn.rst rename doc/paddle/api/paddle/{fluid/layers => }/gather_nd_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/grad_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/has_inf_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/has_nan_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/increment_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/is_empty_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/isfinite_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/linspace_cn.rst (55%) create mode 100644 doc/paddle/api/paddle/load_cn.rst rename doc/paddle/api/paddle/{fluid/layers => }/log_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/logical_and_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/logical_not_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/logical_or_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/logical_xor_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/multiplex_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/dygraph => nn}/BatchNorm_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/dygraph => nn}/BilinearTensorProduct_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/dygraph => nn}/Embedding_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/clip => nn}/GradientClipByGlobalNorm_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/clip => nn}/GradientClipByNorm_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/clip => nn}/GradientClipByValue_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/dygraph => nn}/GroupNorm_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/dygraph/container => nn}/LayerList_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/dygraph => nn}/LayerNorm_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/dygraph/layers => nn}/Layer_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/dygraph => nn}/Linear_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/dygraph/container => nn}/ParameterList_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/dygraph => nn}/Pool2D_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/dygraph/container => nn}/Sequential_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/dygraph => nn}/SpectralNorm_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn}/beam_search_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn}/beam_search_decode_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn}/case_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn}/clip_by_norm_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn}/clip_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn}/cond_cn.rst (100%) rename doc/paddle/api/paddle/{fluid => nn}/data_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional/activation}/softmax_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/adaptive_pool2d_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/adaptive_pool3d_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/add_position_encoding_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/affine_channel_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/affine_grid_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/anchor_generator_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/assign_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/bipartite_match_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/box_clip_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/box_coder_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/box_decoder_and_assign_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/bpr_loss_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/brelu_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/center_loss_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/collect_fpn_proposals_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/continuous_value_model_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/cosine_decay_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/cross_entropy_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/deformable_roi_pooling_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/density_prior_box_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/detection_output_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/dice_loss_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/distribute_fpn_proposals_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/dropout_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/edit_distance_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/elu_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/exponential_decay_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/filter_by_instag_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/fsp_matrix_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/gelu_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/generate_mask_labels_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/generate_proposal_labels_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/generate_proposals_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/grid_sampler_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/hard_sigmoid_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/hard_swish_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/hash_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/huber_loss_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/image_resize_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/image_resize_short_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/inverse_time_decay_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/iou_similarity_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/kldiv_loss_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/l2_normalize_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/label_smooth_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/leaky_relu_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/linear_lr_warmup_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/log_loss_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/logsigmoid_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional/loss}/mse_loss_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/lrn_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/maxout_cn.rst (100%) create mode 100644 doc/paddle/api/paddle/nn/functional/mse_loss_cn.rst rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/multiclass_nms_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/natural_exp_decay_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/noam_decay_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/npair_loss_cn.rst (100%) rename doc/paddle/api/paddle/{fluid => nn/functional}/one_hot_cn.rst (65%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/pad2d_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/pad_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/pad_constant_like_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/piecewise_decay_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/pixel_shuffle_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/polygon_box_transform_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/polynomial_decay_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/pool2d_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/pool3d_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/prior_box_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/prroi_pool_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/psroi_pool_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/random_crop_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/rank_loss_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/relu6_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/resize_bilinear_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/resize_nearest_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/resize_trilinear_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/retinanet_detection_output_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/retinanet_target_assign_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/roi_align_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/roi_perspective_transform_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/roi_pool_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/rpn_target_assign_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/sampled_softmax_with_cross_entropy_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/selu_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/shuffle_channel_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/sigmoid_cross_entropy_with_logits_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/sigmoid_focal_loss_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/similarity_focus_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/smooth_l1_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/soft_relu_cn.rst (100%) delete mode 100755 doc/paddle/api/paddle/nn/functional/softmax_cn.rst rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/softmax_with_cross_entropy_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/softplus_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/softshrink_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/softsign_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/space_to_depth_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/square_error_cost_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/ssd_loss_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/swish_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/tanh_shrink_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/target_assign_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/teacher_student_sigmoid_loss_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/temporal_shift_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/thresholded_relu_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/unfold_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/warpctc_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/yolo_box_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn/functional}/yolov3_loss_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn}/gather_tree_cn.rst (100%) rename doc/paddle/api/paddle/{fluid => nn}/initializer/Bilinear_cn.rst (100%) rename doc/paddle/api/paddle/{fluid => nn}/initializer/Constant_cn.rst (100%) rename doc/paddle/api/paddle/{fluid => nn}/initializer/MSRA_cn.rst (100%) create mode 100644 doc/paddle/api/paddle/nn/initializer/Normal_cn.rst rename doc/paddle/api/paddle/{fluid => nn}/initializer/TruncatedNormal_cn.rst (100%) create mode 100644 doc/paddle/api/paddle/nn/initializer/Uniform_cn.rst rename doc/paddle/api/paddle/{fluid => nn}/initializer/Xavier_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn}/switch_case_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => nn}/while_loop_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/no_grad_cn.rst (100%) rename doc/paddle/api/paddle/{framework => }/prepare_context_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/rank_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/reciprocal_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/reduce_all_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/reduce_any_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/reduce_max_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/reduce_mean_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/reduce_min_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/reduce_prod_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/reduce_sum_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/reshape_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/round_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/rsqrt_cn.rst (100%) create mode 100644 doc/paddle/api/paddle/save_cn.rst rename doc/paddle/api/paddle/{fluid/layers => }/scale_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/scatter_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/scatter_nd_add_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/scatter_nd_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/shape_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/shard_index_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/io => }/shuffle_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/sign_cn.rst (100%) rename doc/paddle/api/paddle/{tensor/math => }/sin_cn.rst (66%) rename doc/paddle/api/paddle/{fluid/layers => }/slice_cn.rst (100%) rename doc/paddle/api/paddle/{tensor/math => }/sqrt_cn.rst (63%) rename doc/paddle/api/paddle/{fluid/layers => }/square_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/stanh_cn.rst (100%) create mode 100644 doc/paddle/api/paddle/static/load_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/batch_norm_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/bilinear_tensor_product_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/conv2d_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/conv2d_transpose_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/conv3d_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/conv3d_transpose_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/create_parameter_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/crf_decoding_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/data_norm_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/deformable_conv_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/embedding_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/fc_cn.rst create mode 100755 doc/paddle/api/paddle/static/nn/group_norm_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/hsigmoid_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/instance_norm_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/layer_norm_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/multi_box_head_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/nce_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/prelu_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/row_conv_cn.rst create mode 100644 doc/paddle/api/paddle/static/nn/spectral_norm_cn.rst create mode 100644 doc/paddle/api/paddle/static/save_cn.rst rename doc/paddle/api/paddle/{fluid/layers => }/strided_slice_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/sums_cn.rst (100%) create mode 100644 doc/paddle/api/paddle/tanh_cn.rst create mode 100644 doc/paddle/api/paddle/tensor/manipulation/expand_as_cn.rst create mode 100644 doc/paddle/api/paddle/tensor/manipulation/expand_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/manipulation/squeeze_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/abs_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/acos_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/asin_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/atan_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/ceil_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/cos_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/elementwise_add_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/elementwise_div_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/elementwise_floordiv_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/elementwise_mod_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/elementwise_pow_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/elementwise_sub_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/erf_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/exp_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/floor_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/increment_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/log_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/multiplex_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/reciprocal_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/reduce_max_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/reduce_min_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/reduce_prod_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/reduce_sum_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/round_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/rsqrt_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/scale_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/sign_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/square_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/stanh_cn.rst mode change 100644 => 100755 doc/paddle/api/paddle/tensor/math/sum_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/sums_cn.rst delete mode 100644 doc/paddle/api/paddle/tensor/math/tanh_cn.rst rename doc/paddle/api/paddle/{framework => }/to_variable_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/topk_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/transpose_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/unique_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/unique_with_counts_cn.rst (100%) rename doc/paddle/api/paddle/{fluid/layers => }/unstack_cn.rst (100%) diff --git a/doc/paddle/api/alias_api_mapping b/doc/paddle/api/alias_api_mapping index 9af28a536..a706d959d 100644 --- a/doc/paddle/api/alias_api_mapping +++ b/doc/paddle/api/alias_api_mapping @@ -1,727 +1,396 @@ -paddle.fluid.layers.swish paddle.nn.functional.activation.swish -paddle.fluid.layers.space_to_depth paddle.nn.functional.space_to_depth -paddle.fluid.layers.soft_relu paddle.nn.functional.soft_relu -paddle.fluid.layers.reciprocal paddle.tensor.math.reciprocal -paddle.fluid.layers.pixel_shuffle paddle.nn.functional.vision.pixel_shuffle -paddle.tensor.math.mm paddle.mm -paddle.fluid.layers.mean paddle.mean -paddle.tensor.math.inverse paddle.tensor.inverse -paddle.tensor.creation.full paddle.full -paddle.tensor.manipulation.flip paddle.tensor.reverse -paddle.fluid.layers.erf paddle.nn.functional.activation.erf -paddle.tensor.math.abs paddle.abs -paddle.fluid.layers.unique_with_counts paddle.unique_with_counts -paddle.fluid.layers.similarity_focus paddle.nn.functional.similarity_focus -paddle.fluid.layers.rpn_target_assign paddle.nn.functional.rpn_target_assign -paddle.tensor.math.increment paddle.tensor.increment -paddle.fluid.layers.hash paddle.nn.functional.lod.hash -paddle.tensor.math.elementwise_sub paddle.elementwise_sub -paddle.fluid.layers.create_tensor paddle.tensor.create_tensor -paddle.nn.functional.conv.conv2d paddle.nn.functional.conv2d -paddle.tensor.linalg.cholesky paddle.cholesky -paddle.fluid.layers.case paddle.nn.control_flow.case -paddle.fluid.layers.bipartite_match paddle.nn.functional.vision.bipartite_match -paddle.framework.ParamAttr paddle.ParamAttr -paddle.nn.layer.activation.HSigmoid paddle.nn.HSigmoid -paddle.tensor.math.square paddle.square -paddle.fluid.layers.roi_align paddle.nn.functional.roi_align -paddle.tensor.math.logsumexp paddle.logsumexp -paddle.fluid.layers.expand paddle.expand -paddle.tensor.math.exp paddle.tensor.exp -paddle.fluid.layers.elu paddle.nn.functional.elu -paddle.fluid.layers.cast paddle.cast -paddle.fluid.layers.beam_search paddle.nn.decode.beam_search -paddle.fluid.layers.affine_grid paddle.nn.functional.affine_grid -paddle.nn.layer.activation.Sigmoid paddle.nn.Sigmoid -paddle.nn.layer.loss.NLLLoss paddle.nn.layer.NLLLoss -paddle.nn.layer.loss.MSELoss paddle.nn.layer.MSELoss -paddle.nn.layer.loss.BCELoss paddle.nn.layer.BCELoss -paddle.tensor.manipulation.unsqueeze paddle.unsqueeze -paddle.fluid.layers.unique paddle.unique -paddle.fluid.layers.teacher_student_sigmoid_loss paddle.nn.functional.loss.teacher_student_sigmoid_loss -paddle.fluid.layers.sums paddle.tensor.math.sums -paddle.tensor.math.stanh paddle.stanh -paddle.fluid.layers.inverse_time_decay paddle.nn.functional.inverse_time_decay -paddle.fluid.layers.expand_as paddle.expand_as -paddle.framework.default_startup_program paddle.default_startup_program -paddle.fluid.layers.crop_tensor paddle.tensor.crop_tensor -paddle.nn.layer.loss.L1Loss paddle.nn.layer.L1Loss -paddle.fluid.clip.GradientClipByValue paddle.nn.clip.GradientClipByValue -paddle.tensor.math.tanh paddle.tanh -paddle.fluid.layers.square_error_cost paddle.nn.functional.loss.square_error_cost -paddle.framework.py_func paddle.py_func -paddle.fluid.layers.pad paddle.nn.functional.common.pad -paddle.fluid.layers.natural_exp_decay paddle.nn.functional.natural_exp_decay -paddle.tensor.math.kron paddle.tensor.kron -paddle.fluid.layers.is_empty paddle.tensor.is_empty -paddle.fluid.layers.exponential_decay paddle.nn.functional.exponential_decay -paddle.fluid.layers.elementwise_floordiv paddle.tensor.math.elementwise_floordiv -paddle.nn.layer.conv paddle.nn.conv -paddle.fluid.layers.brelu paddle.nn.functional.brelu -paddle.fluid.layers.box_clip paddle.nn.functional.vision.box_clip -paddle.fluid.layers.add_position_encoding paddle.nn.functional.extension.add_position_encoding -paddle.fluid.layers.target_assign paddle.nn.functional.target_assign -paddle.fluid.layers.strided_slice paddle.tensor.manipulation.strided_slice -paddle.tensor.manipulation.squeeze paddle.squeeze -paddle.nn.functional.activation.relu paddle.nn.functional.relu -paddle.fluid.layers.polynomial_decay paddle.nn.functional.polynomial_decay -paddle.framework.random.manual_seed paddle.manual_seed -paddle.fluid.io.load paddle.tensor.io.load -paddle.fluid.layers.label_smooth paddle.nn.functional.label_smooth -paddle.fluid.layers.hash paddle.nn.functional.hash -paddle.fluid.layers.assign paddle.nn.functional.common.assign -paddle.fluid.layers.acos paddle.tensor.math.acos -paddle.nn.layer.conv.Conv3D paddle.nn.layer.Conv3D -paddle.fluid.dygraph.BilinearTensorProduct paddle.nn.layer.BilinearTensorProduct -paddle.fluid.layers.selu paddle.nn.functional.selu -paddle.tensor.random.rand paddle.rand -paddle.fluid.layers.piecewise_decay paddle.nn.functional.piecewise_decay -paddle.fluid.layers.iou_similarity paddle.nn.functional.loss.iou_similarity -paddle.fluid.layers.increment paddle.tensor.math.increment -paddle.fluid.layers.hard_swish paddle.nn.functional.hard_swish -paddle.fluid.layers.elementwise_add paddle.tensor.math.elementwise_add -paddle.framework.default_main_program paddle.default_main_program -paddle.tensor.math.cumsum paddle.tensor.cumsum -paddle.fluid.layers.cos paddle.tensor.math.cos -paddle.tensor.math.add paddle.tensor.add -paddle.fluid.layers.unfold paddle.nn.functional.common.unfold -paddle.fluid.layers.switch_case paddle.nn.switch_case -paddle.tensor.math.round paddle.tensor.round -paddle.fluid.layers.pixel_shuffle paddle.nn.functional.pixel_shuffle -paddle.tensor.math.inverse paddle.inverse -paddle.tensor.manipulation.flip paddle.reverse -paddle.fluid.layers.clip_by_norm paddle.nn.clip.clip_by_norm -paddle.tensor.math.asin paddle.tensor.asin -paddle.fluid.layers.teacher_student_sigmoid_loss paddle.nn.functional.teacher_student_sigmoid_loss -paddle.tensor.random.randperm paddle.tensor.randperm -paddle.tensor.random.randn paddle.tensor.randn -paddle.tensor.math.multiply paddle.tensor.multiply -paddle.fluid.layers.multiclass_nms paddle.nn.functional.multiclass_nms -paddle.fluid.io.load paddle.tensor.load -paddle.tensor.math.increment paddle.increment -paddle.fluid.layers.create_tensor paddle.create_tensor -paddle.fluid.layers.bipartite_match paddle.nn.functional.bipartite_match -paddle.nn.layer.loss.NLLLoss paddle.nn.NLLLoss -paddle.nn.layer.loss.MSELoss paddle.nn.MSELoss -paddle.fluid.clip.GradientClipByValue paddle.nn.GradientClipByValue -paddle.nn.layer.loss.BCELoss paddle.nn.BCELoss -paddle.fluid.layers.square_error_cost paddle.nn.functional.square_error_cost -paddle.fluid.layers.softplus paddle.nn.functional.softplus -paddle.fluid.layers.logical_not paddle.tensor.logic.logical_not -paddle.tensor.math.exp paddle.exp -paddle.fluid.layers.elementwise_sub paddle.tensor.math.elementwise_sub -paddle.tensor.math.elementwise_div paddle.tensor.elementwise_div -paddle.fluid.layers.elementwise_div paddle.tensor.math.elementwise_div -paddle.fluid.layers.cosine_decay paddle.nn.functional.cosine_decay -paddle.fluid.layers.cond paddle.nn.control_flow.cond -paddle.fluid.layers.beam_search paddle.nn.beam_search -paddle.nn.layer.loss.L1Loss paddle.nn.L1Loss -paddle.fluid.layers.temporal_shift paddle.nn.functional.extension.temporal_shift -paddle.tensor.math.sum paddle.tensor.sum -paddle.fluid.layers.softsign paddle.nn.functional.softsign -paddle.tensor.linalg.histogram paddle.tensor.histogram -paddle.fluid.layers.diag paddle.tensor.creation.diag -paddle.fluid.layers.crop_tensor paddle.crop_tensor -paddle.fluid.layers.beam_search_decode paddle.nn.decode.beam_search_decode -paddle.fluid.layers.adaptive_pool3d paddle.nn.functional.pooling.adaptive_pool3d -paddle.tensor.search.where paddle.tensor.where -paddle.fluid.layers.swish paddle.nn.functional.swish -paddle.fluid.layers.smooth_l1 paddle.nn.functional.loss.smooth_l1 -paddle.fluid.layers.reduce_any paddle.tensor.logic.reduce_any -paddle.fluid.layers.pad paddle.nn.functional.pad -paddle.fluid.layers.noam_decay paddle.nn.functional.noam_decay -paddle.tensor.math.kron paddle.kron -paddle.fluid.layers.is_empty paddle.is_empty -paddle.fluid.layers.iou_similarity paddle.nn.functional.iou_similarity -paddle.tensor.manipulation.gather paddle.tensor.gather -paddle.fluid.layers.erf paddle.nn.functional.erf -paddle.fluid.layers.box_clip paddle.nn.functional.box_clip -paddle.tensor.search.argsort paddle.tensor.argsort -paddle.nn.layer.conv.Conv3D paddle.nn.Conv3D -paddle.framework.CompiledProgram paddle.CompiledProgram -paddle.fluid.dygraph.BilinearTensorProduct paddle.nn.BilinearTensorProduct -paddle.tensor.creation.triu paddle.tensor.triu -paddle.tensor.math.reduce_prod paddle.tensor.reduce_prod -paddle.tensor.creation.meshgrid paddle.tensor.meshgrid -paddle.fluid.layers.clip_by_norm paddle.nn.clip_by_norm -paddle.fluid.layers.assign paddle.nn.functional.assign -paddle.tensor.math.acos paddle.tensor.acos -paddle.fluid.initializer.Xavier paddle.nn.initializer.Xavier -paddle.tensor.creation.zeros_like paddle.tensor.zeros_like -paddle.fluid.layers.slice paddle.tensor.manipulation.slice -paddle.fluid.layers.sigmoid_focal_loss paddle.nn.functional.loss.sigmoid_focal_loss -paddle.fluid.io.shuffle paddle.tensor.random.shuffle -paddle.fluid.layers.linear_lr_warmup paddle.nn.functional.linear_lr_warmup -paddle.tensor.logic.greater_than paddle.tensor.greater_than -paddle.tensor.math.cumsum paddle.cumsum -paddle.framework.create_parameter paddle.create_parameter -paddle.tensor.math.clamp paddle.tensor.clamp -paddle.tensor.math.add paddle.add -paddle.fluid.layers.unfold paddle.nn.functional.unfold -paddle.tensor.search.sort paddle.tensor.sort -paddle.fluid.layers.rsqrt paddle.tensor.math.rsqrt -paddle.tensor.math.round paddle.round -paddle.fluid.layers.resize_nearest paddle.nn.functional.vision.resize_nearest -paddle.tensor.creation.ones_like paddle.tensor.ones_like -paddle.framework.global_scope paddle.global_scope -paddle.tensor.creation.full_like paddle.tensor.full_like -paddle.tensor.math.elementwise_max paddle.tensor.elementwise_max -paddle.fluid.layers.deformable_roi_pooling paddle.nn.functional.vision.deformable_roi_pooling -paddle.fluid.layers.ceil paddle.tensor.math.ceil -paddle.fluid.layers.case paddle.nn.case -paddle.tensor.math.asin paddle.asin -paddle.fluid.layers.add_position_encoding paddle.nn.functional.add_position_encoding -paddle.fluid.layers.retinanet_detection_output paddle.nn.functional.vision.retinanet_detection_output -paddle.tensor.random.randperm paddle.randperm -paddle.tensor.random.randn paddle.randn -paddle.tensor.math.multiply paddle.multiply -paddle.fluid.layers.margin_rank_loss paddle.nn.functional.loss.margin_rank_loss -paddle.fluid.layers.logical_not paddle.tensor.logical_not -paddle.fluid.io.load paddle.load -paddle.fluid.layers.grid_sampler paddle.nn.functional.vision.grid_sampler -paddle.fluid.layers.elementwise_mul paddle.tensor.math.elementwise_mul -paddle.fluid.layers.elementwise_max paddle.tensor.math.elementwise_max -paddle.fluid.layers.continuous_value_model paddle.nn.functional.extension.continuous_value_model -paddle.fluid.layers.smooth_l1 paddle.nn.functional.smooth_l1 -paddle.tensor.linalg.norm paddle.tensor.norm -paddle.framework.name_scope paddle.name_scope -paddle.tensor.logic.less_than paddle.tensor.less_than -paddle.fluid.layers.l2_normalize paddle.nn.functional.norm.l2_normalize -paddle.tensor.math.elementwise_min paddle.tensor.elementwise_min -paddle.tensor.math.elementwise_div paddle.elementwise_div -paddle.tensor.math.sum paddle.sum -paddle.fluid.layers.shard_index paddle.tensor.manipulation.shard_index -paddle.fluid.layers.scatter paddle.tensor.manipulation.scatter -paddle.fluid.layers.reduce_any paddle.tensor.reduce_any -paddle.tensor.search.nonzero paddle.tensor.nonzero -paddle.fluid.layers.logical_xor paddle.tensor.logic.logical_xor -paddle.tensor.linalg.histogram paddle.histogram -paddle.fluid.layers.elementwise_mod paddle.tensor.math.elementwise_mod -paddle.fluid.layers.elementwise_min paddle.tensor.math.elementwise_min -paddle.fluid.layers.distribute_fpn_proposals paddle.nn.functional.vision.distribute_fpn_proposals -paddle.fluid.layers.beam_search_decode paddle.nn.beam_search_decode -paddle.fluid.layers.yolov3_loss paddle.nn.functional.vision.yolov3_loss -paddle.tensor.search.where paddle.where -paddle.fluid.layers.strided_slice paddle.tensor.strided_slice -paddle.fluid.layers.sigmoid_focal_loss paddle.nn.functional.sigmoid_focal_loss -paddle.tensor.manipulation.gather paddle.gather -paddle.fluid.layers.filter_by_instag paddle.nn.functional.extension.filter_by_instag -paddle.tensor.math.erf paddle.tensor.erf -paddle.tensor.search.argsort paddle.argsort -paddle.tensor.math.addcmul paddle.tensor.addcmul -paddle.fluid.layers.adaptive_pool3d paddle.nn.functional.adaptive_pool3d -paddle.tensor.creation.triu paddle.triu -paddle.fluid.layers.topk paddle.tensor.search.topk -paddle.tensor.math.reduce_prod paddle.reduce_prod -paddle.tensor.math.reduce_max paddle.tensor.reduce_max -paddle.fluid.layers.pool3d paddle.nn.functional.pooling.pool3d -paddle.tensor.creation.meshgrid paddle.meshgrid -paddle.fluid.layers.diag paddle.tensor.diag -paddle.tensor.math.acos paddle.acos -paddle.framework.Program paddle.Program -paddle.framework.ParallelExecutor paddle.ParallelExecutor -paddle.tensor.creation.zeros_like paddle.zeros_like -paddle.fluid.layers.temporal_shift paddle.nn.functional.temporal_shift -paddle.fluid.io.shuffle paddle.tensor.shuffle -paddle.fluid.layers.reduce_max paddle.tensor.math.reduce_max -paddle.fluid.layers.margin_rank_loss paddle.nn.functional.margin_rank_loss -paddle.tensor.logic.greater_than paddle.greater_than -paddle.tensor.math.elementwise_mod paddle.tensor.elementwise_mod -paddle.fluid.layers.collect_fpn_proposals paddle.nn.functional.vision.collect_fpn_proposals -paddle.tensor.math.clamp paddle.clamp -paddle.fluid.layers.center_loss paddle.nn.functional.loss.center_loss -paddle.fluid.layers.atan paddle.tensor.math.atan -paddle.fluid.layers.unstack paddle.tensor.manipulation.unstack -paddle.tensor.creation.tril paddle.tensor.tril -paddle.tensor.search.sort paddle.sort -paddle.tensor.math.sin paddle.tensor.sin -paddle.tensor.math.sign paddle.tensor.sign -paddle.fluid.layers.resize_nearest paddle.nn.functional.resize_nearest -paddle.fluid.layers.reduce_prod paddle.tensor.math.reduce_prod -paddle.tensor.math.reduce_min paddle.tensor.reduce_min -paddle.tensor.creation.ones_like paddle.ones_like -paddle.fluid.layers.npair_loss paddle.nn.functional.loss.npair_loss -paddle.fluid.layers.l2_normalize paddle.nn.functional.l2_normalize -paddle.fluid.layers.kldiv_loss paddle.nn.functional.loss.kldiv_loss -paddle.fluid.layers.image_resize paddle.nn.functional.vision.image_resize -paddle.fluid.layers.huber_loss paddle.nn.functional.loss.huber_loss -paddle.tensor.creation.full_like paddle.full_like -paddle.fluid.layers.flatten paddle.tensor.manipulation.flatten -paddle.tensor.math.elementwise_max paddle.elementwise_max -paddle.fluid.layers.deformable_roi_pooling paddle.nn.functional.deformable_roi_pooling -paddle.fluid.layers.clip paddle.nn.clip.clip -paddle.fluid.layers.retinanet_detection_output paddle.nn.functional.retinanet_detection_output -paddle.fluid.layers.reduce_min paddle.tensor.math.reduce_min -paddle.fluid.layers.rank_loss paddle.nn.functional.loss.rank_loss -paddle.tensor.random.randint paddle.tensor.randint -paddle.fluid.layers.logical_not paddle.logical_not -paddle.fluid.layers.grid_sampler paddle.nn.functional.grid_sampler -paddle.fluid.layers.gather_tree paddle.nn.decode.gather_tree -paddle.tensor.math.elementwise_pow paddle.tensor.elementwise_pow -paddle.fluid.layers.elementwise_pow paddle.tensor.math.elementwise_pow -paddle.fluid.layers.dice_loss paddle.nn.functional.loss.dice_loss -paddle.fluid.layers.cond paddle.nn.cond -paddle.tensor.math.atan paddle.tensor.atan -paddle.fluid.layers.tanh_shrink paddle.nn.functional.activation.tanh_shrink -paddle.fluid.layers.ssd_loss paddle.nn.functional.loss.ssd_loss -paddle.tensor.linalg.norm paddle.norm -paddle.fluid.layers.mse_loss paddle.nn.functional.loss.mse_loss -paddle.fluid.layers.logical_xor paddle.tensor.logical_xor -paddle.fluid.layers.log_loss paddle.nn.functional.loss.log_loss -paddle.tensor.logic.less_than paddle.less_than -paddle.fluid.layers.hard_shrink paddle.nn.functional.activation.hard_shrink -paddle.fluid.layers.generate_proposals paddle.nn.functional.vision.generate_proposals -paddle.tensor.math.elementwise_min paddle.elementwise_min -paddle.tensor.math.elementwise_floordiv paddle.tensor.elementwise_floordiv -paddle.fluid.layers.dropout paddle.nn.functional.common.dropout -paddle.fluid.layers.bpr_loss paddle.nn.functional.loss.bpr_loss -paddle.fluid.layers.softshrink paddle.nn.functional.activation.softshrink -paddle.fluid.layers.reduce_any paddle.reduce_any -paddle.tensor.search.nonzero paddle.nonzero -paddle.fluid.layers.has_inf paddle.tensor.search.has_inf -paddle.fluid.layers.distribute_fpn_proposals paddle.nn.functional.distribute_fpn_proposals -paddle.framework.create_global_var paddle.create_global_var -paddle.fluid.dygraph.Embedding paddle.nn.layer.common.Embedding -paddle.fluid.layers.yolov3_loss paddle.nn.functional.yolov3_loss -paddle.fluid.layers.strided_slice paddle.strided_slice -paddle.fluid.layers.softmax paddle.nn.functional.activation.softmax -paddle.fluid.layers.multiplex paddle.tensor.math.multiplex -paddle.tensor.manipulation.flip paddle.tensor.flip -paddle.tensor.math.erf paddle.erf -paddle.fluid.layers.detection_output paddle.nn.functional.vision.detection_output -paddle.fluid.layers.continuous_value_model paddle.nn.functional.continuous_value_model -paddle.fluid.layers.center_loss paddle.nn.functional.center_loss -paddle.fluid.layers.box_coder paddle.nn.functional.vision.box_coder -paddle.tensor.math.addcmul paddle.addcmul -paddle.fluid.layers.topk paddle.tensor.topk -paddle.fluid.layers.slice paddle.tensor.slice -paddle.fluid.layers.sign paddle.tensor.math.sign -paddle.fluid.layers.round paddle.tensor.math.round -paddle.tensor.math.reduce_max paddle.reduce_max -paddle.fluid.layers.reduce_all paddle.tensor.logic.reduce_all -paddle.fluid.layers.npair_loss paddle.nn.functional.npair_loss -paddle.tensor.math.multiplex paddle.tensor.multiplex -paddle.fluid.layers.linspace paddle.tensor.creation.linspace -paddle.fluid.layers.kldiv_loss paddle.nn.functional.kldiv_loss -paddle.fluid.layers.huber_loss paddle.nn.functional.huber_loss -paddle.fluid.layers.diag paddle.diag -paddle.tensor.math.cos paddle.tensor.cos -paddle.fluid.layers.clip paddle.nn.clip -paddle.fluid.layers.argmin paddle.tensor.search.argmin -paddle.fluid.dygraph.Pool2D paddle.nn.layer.common.Pool2D -paddle.tensor.linalg.t paddle.t -paddle.fluid.io.shuffle paddle.shuffle -paddle.tensor.math.rsqrt paddle.tensor.rsqrt -paddle.tensor.manipulation.roll paddle.tensor.roll -paddle.fluid.layers.reduce_sum paddle.tensor.math.reduce_sum -paddle.fluid.layers.rank_loss paddle.nn.functional.rank_loss -paddle.fluid.layers.pool3d paddle.nn.functional.pool3d -paddle.fluid.one_hot paddle.nn.functional.common.one_hot -paddle.tensor.math.elementwise_mod paddle.elementwise_mod -paddle.fluid.layers.dice_loss paddle.nn.functional.dice_loss -paddle.fluid.layers.collect_fpn_proposals paddle.nn.functional.collect_fpn_proposals -paddle.tensor.linalg.bmm paddle.tensor.bmm -paddle.nn.layer.extension.RowConv paddle.nn.layer.RowConv -paddle.fluid.clip.GradientClipByGlobalNorm paddle.nn.clip.GradientClipByGlobalNorm -paddle.framework.Executor paddle.Executor -paddle.tensor.creation.tril paddle.tril -paddle.fluid.layers.ssd_loss paddle.nn.functional.ssd_loss -paddle.tensor.math.sqrt paddle.tensor.sqrt -paddle.tensor.math.sin paddle.sin -paddle.tensor.math.sign paddle.sign -paddle.fluid.layers.resize_trilinear paddle.nn.functional.vision.resize_trilinear -paddle.tensor.math.reduce_min paddle.reduce_min -paddle.fluid.layers.mse_loss paddle.nn.functional.mse_loss -paddle.fluid.layers.maxout paddle.nn.functional.activation.maxout -paddle.fluid.layers.log_loss paddle.nn.functional.log_loss -paddle.tensor.search.index_sample paddle.tensor.index_sample -paddle.fluid.layers.image_resize paddle.nn.functional.image_resize -paddle.fluid.layers.generate_proposal_labels paddle.nn.functional.vision.generate_proposal_labels -paddle.fluid.layers.filter_by_instag paddle.nn.functional.filter_by_instag -paddle.fluid.layers.bpr_loss paddle.nn.functional.bpr_loss -paddle.fluid.layers.asin paddle.tensor.math.asin -paddle.fluid.dygraph.container.Sequential paddle.nn.Sequential -paddle.nn.layer.common.Pad2D paddle.nn.layer.Pad2D -paddle.fluid.layers.resize_bilinear paddle.nn.functional.vision.resize_bilinear -paddle.tensor.random.randint paddle.randint -paddle.fluid.layers.gather_tree paddle.nn.gather_tree -paddle.tensor.math.elementwise_pow paddle.elementwise_pow -paddle.nn.functional.extension.diag_embed paddle.nn.functional.diag_embed -paddle.tensor.math.atan paddle.atan -paddle.tensor.math.addmm paddle.tensor.addmm -paddle.tensor.math.sums paddle.tensor.sums -paddle.tensor.manipulation.split paddle.tensor.split -paddle.fluid.layers.shuffle_channel paddle.nn.functional.vision.shuffle_channel -paddle.fluid.layers.shard_index paddle.tensor.shard_index -paddle.fluid.layers.scatter_nd paddle.tensor.manipulation.scatter_nd -paddle.fluid.layers.scatter paddle.tensor.scatter -paddle.fluid.layers.logical_xor paddle.logical_xor -paddle.fluid.framework.in_dygraph_mode paddle.in_imperative_mode -paddle.fluid.layers.generate_proposals paddle.nn.functional.generate_proposals -paddle.tensor.math.elementwise_floordiv paddle.elementwise_floordiv -paddle.fluid.layers.edit_distance paddle.nn.functional.loss.edit_distance -paddle.fluid.layers.dropout paddle.nn.functional.dropout -paddle.nn.layer.common.UpSample paddle.nn.layer.UpSample -paddle.fluid.layers.while_loop paddle.nn.control_flow.while_loop -paddle.fluid.layers.scatter_nd_add paddle.tensor.manipulation.scatter_nd_add -paddle.fluid.layers.scale paddle.tensor.math.scale -paddle.fluid.layers.logical_and paddle.tensor.logic.logical_and -paddle.fluid.layers.has_inf paddle.tensor.has_inf -paddle.fluid.layers.gather_nd paddle.tensor.manipulation.gather_nd -paddle.fluid.layers.fill_constant paddle.tensor.creation.fill_constant -paddle.fluid.layers.cumsum paddle.tensor.math.cumsum -paddle.tensor.search.argmax paddle.tensor.argmax -paddle.fluid.layers.affine_channel paddle.nn.functional.vision.affine_channel -paddle.fluid.dygraph.Embedding paddle.nn.layer.Embedding -paddle.framework.CUDAPinnedPlace paddle.CUDAPinnedPlace -paddle.fluid.layers.sampled_softmax_with_cross_entropy paddle.nn.functional.loss.sampled_softmax_with_cross_entropy -paddle.fluid.layers.reduce_all paddle.tensor.reduce_all -paddle.nn.functional.activation.log_softmax paddle.nn.functional.log_softmax -paddle.fluid.layers.has_nan paddle.tensor.search.has_nan -paddle.fluid.layers.generate_mask_labels paddle.nn.functional.vision.generate_mask_labels -paddle.tensor.manipulation.flip paddle.flip -paddle.tensor.linalg.dot paddle.tensor.dot -paddle.fluid.layers.detection_output paddle.nn.functional.detection_output -paddle.nn.functional.conv.conv3d paddle.nn.functional.conv3d -paddle.fluid.layers.box_coder paddle.nn.functional.box_coder -paddle.fluid.clip.GradientClipByGlobalNorm paddle.nn.GradientClipByGlobalNorm -paddle.fluid.layers.topk paddle.topk -paddle.fluid.layers.slice paddle.slice -paddle.tensor.math.multiplex paddle.multiplex -paddle.tensor.logic.equal_all paddle.tensor.equal_all -paddle.tensor.math.cos paddle.cos -paddle.fluid.layers.argmin paddle.tensor.argmin -paddle.fluid.dygraph.SpectralNorm paddle.nn.layer.norm.SpectralNorm -paddle.nn.layer.extension.RowConv paddle.nn.RowConv -paddle.fluid.dygraph.Pool2D paddle.nn.layer.Pool2D -paddle.fluid.clip.GradientClipByNorm paddle.nn.clip.GradientClipByNorm -paddle.fluid.layers.unstack paddle.tensor.unstack -paddle.fluid.layers.tanh_shrink paddle.nn.functional.tanh_shrink -paddle.tensor.math.rsqrt paddle.rsqrt -paddle.tensor.manipulation.roll paddle.roll -paddle.fluid.layers.reshape paddle.tensor.manipulation.reshape -paddle.fluid.layers.random_crop paddle.nn.functional.extension.random_crop -paddle.fluid.layers.psroi_pool paddle.nn.functional.vision.psroi_pool -paddle.fluid.layers.prroi_pool paddle.nn.functional.vision.prroi_pool -paddle.fluid.one_hot paddle.nn.functional.one_hot -paddle.fluid.layers.isfinite paddle.tensor.logic.isfinite -paddle.fluid.layers.hard_shrink paddle.nn.functional.hard_shrink -paddle.fluid.layers.flatten paddle.tensor.flatten -paddle.tensor.linalg.bmm paddle.bmm -paddle.nn.layer.common.Pad2D paddle.nn.Pad2D -paddle.tensor.math.sqrt paddle.sqrt -paddle.fluid.layers.softshrink paddle.nn.functional.softshrink -paddle.fluid.layers.resize_trilinear paddle.nn.functional.resize_trilinear -paddle.fluid.layers.linspace paddle.tensor.linspace -paddle.tensor.search.index_sample paddle.index_sample -paddle.fluid.layers.generate_proposal_labels paddle.nn.functional.generate_proposal_labels -paddle.fluid.layers.edit_distance paddle.nn.functional.edit_distance -paddle.fluid.layers.density_prior_box paddle.nn.functional.vision.density_prior_box -paddle.tensor.manipulation.stack paddle.tensor.stack -paddle.fluid.layers.square paddle.tensor.math.square -paddle.fluid.layers.softmax paddle.nn.functional.softmax -paddle.fluid.layers.roi_pool paddle.nn.functional.vision.roi_pool -paddle.fluid.layers.resize_bilinear paddle.nn.functional.resize_bilinear -paddle.tensor.linalg.matmul paddle.tensor.matmul -paddle.nn.functional.common.interpolate paddle.nn.functional.interpolate -paddle.tensor.math.div paddle.tensor.div -paddle.tensor.manipulation.concat paddle.tensor.concat -paddle.tensor.math.addmm paddle.addmm -paddle.nn.layer.common.UpSample paddle.nn.UpSample -paddle.fluid.dygraph.LayerNorm paddle.nn.layer.norm.LayerNorm -paddle.fluid.dygraph.GroupNorm paddle.nn.layer.norm.GroupNorm -paddle.fluid.dygraph.BatchNorm paddle.nn.layer.norm.BatchNorm -paddle.tensor.manipulation.unbind paddle.tensor.unbind -paddle.tensor.math.sums paddle.sums -paddle.tensor.manipulation.split paddle.split -paddle.fluid.layers.shuffle_channel paddle.nn.functional.shuffle_channel -paddle.fluid.layers.shard_index paddle.shard_index -paddle.fluid.layers.scatter paddle.scatter -paddle.fluid.layers.sampled_softmax_with_cross_entropy paddle.nn.functional.sampled_softmax_with_cross_entropy -paddle.fluid.layers.roi_perspective_transform paddle.nn.functional.vision.roi_perspective_transform -paddle.fluid.layers.logical_and paddle.tensor.logical_and -paddle.tensor.math.log paddle.tensor.log -paddle.fluid.initializer.TruncatedNormal paddle.nn.initializer.TruncatedNormal -paddle.nn.layer.activation.LeakyReLU paddle.nn.layer.LeakyReLU -paddle.fluid.dygraph.Embedding paddle.nn.Embedding -paddle.fluid.initializer.Constant paddle.nn.initializer.Constant -paddle.framework.CUDAPlace paddle.CUDAPlace -paddle.tensor.linalg.t paddle.tensor.t -paddle.fluid.layers.polygon_box_transform paddle.nn.functional.extension.polygon_box_transform -paddle.fluid.layers.has_inf paddle.has_inf -paddle.tensor.logic.greater_equal paddle.tensor.greater_equal -paddle.nn.functional.conv.conv3d_transpose paddle.nn.functional.conv3d_transpose -paddle.nn.functional.conv.conv2d_transpose paddle.nn.functional.conv2d_transpose -paddle.tensor.search.argmax paddle.argmax -paddle.fluid.layers.affine_channel paddle.nn.functional.affine_channel -paddle.fluid.dygraph.SpectralNorm paddle.nn.layer.SpectralNorm -paddle.nn.layer.activation.LogSoftmax paddle.nn.layer.LogSoftmax -paddle.nn.layer.norm.InstanceNorm paddle.nn.layer.InstanceNorm -paddle.fluid.clip.GradientClipByNorm paddle.nn.GradientClipByNorm -paddle.nn.layer.conv.Conv2D paddle.nn.layer.Conv2D -paddle.framework.CPUPlace paddle.CPUPlace -paddle.fluid.layers.transpose paddle.tensor.manipulation.transpose -paddle.fluid.layers.shape paddle.tensor.attribute.shape -paddle.fluid.layers.reduce_all paddle.reduce_all -paddle.fluid.layers.maxout paddle.nn.functional.maxout -paddle.fluid.layers.image_resize_short paddle.nn.functional.vision.image_resize_short -paddle.fluid.layers.has_nan paddle.tensor.has_nan -paddle.fluid.layers.hard_sigmoid paddle.nn.functional.activation.hard_sigmoid -paddle.fluid.layers.generate_mask_labels paddle.nn.functional.generate_mask_labels -paddle.tensor.linalg.dot paddle.dot -paddle.fluid.dygraph.Pool2D paddle.nn.Pool2D -paddle.tensor.math.trace paddle.tensor.trace -paddle.fluid.layers.softmax_with_cross_entropy paddle.nn.functional.loss.softmax_with_cross_entropy -paddle.fluid.layers.isfinite paddle.tensor.isfinite -paddle.tensor.search.index_select paddle.tensor.index_select -paddle.fluid.layers.fill_constant paddle.tensor.fill_constant -paddle.tensor.logic.equal_all paddle.equal_all -paddle.fluid.layers.argmin paddle.argmin -paddle.fluid.layers.unstack paddle.unstack -paddle.tensor.math.reciprocal paddle.tensor.reciprocal -paddle.fluid.layers.psroi_pool paddle.nn.functional.psroi_pool -paddle.fluid.layers.prroi_pool paddle.nn.functional.prroi_pool -paddle.tensor.math.max paddle.tensor.max -paddle.fluid.layers.logsigmoid paddle.nn.functional.activation.logsigmoid -paddle.tensor.logic.less_equal paddle.tensor.less_equal -paddle.fluid.layers.flatten paddle.flatten -paddle.fluid.dygraph.base.disable_dygraph paddle.disable_imperative -paddle.tensor.math.cosh paddle.tensor.cosh -paddle.tensor.math.ceil paddle.tensor.ceil -paddle.fluid.dygraph.LayerNorm paddle.nn.layer.LayerNorm -paddle.fluid.dygraph.GroupNorm paddle.nn.layer.GroupNorm -paddle.framework.ExecutionStrategy paddle.ExecutionStrategy -paddle.fluid.dygraph.BatchNorm paddle.nn.layer.BatchNorm -paddle.tensor.logic.not_equal paddle.tensor.not_equal -paddle.fluid.layers.linspace paddle.linspace -paddle.fluid.dygraph.base.enable_dygraph paddle.enable_imperative -paddle.fluid.layers.density_prior_box paddle.nn.functional.density_prior_box -paddle.fluid.layers.anchor_generator paddle.nn.functional.vision.anchor_generator -paddle.nn.layer.activation.ReLU paddle.nn.layer.ReLU +paddle.tensor.math.inverse paddle.inverse,paddle.tensor.inverse +paddle.tensor.linalg.bmm paddle.bmm,paddle.tensor.bmm +paddle.fluid.layers.selu paddle.nn.functional.selu,paddle.nn.functional.activation.selu +paddle.fluid.layers.rpn_target_assign paddle.nn.functional.rpn_target_assign,paddle.nn.functional.extension.rpn_target_assign +paddle.fluid.layers.pad_constant_like paddle.nn.functional.pad_constant_like,paddle.nn.functional.common.pad_constant_like +paddle.fluid.layers.logical_not paddle.logical_not,paddle.tensor.logical_not,paddle.tensor.logic.logical_not +paddle.fluid.layers.layer_norm paddle.static.nn.layer_norm +paddle.fluid.layers.group_norm paddle.static.nn.group_norm +paddle.fluid.layers.gather_nd paddle.gather_nd,paddle.tensor.gather_nd,paddle.tensor.manipulation.gather_nd +paddle.fluid.layers.batch_norm paddle.static.nn.batch_norm +paddle.fluid.framework.default_main_program paddle.static.default_main_program +paddle.fluid.dygraph.BilinearTensorProduct paddle.nn.BilinearTensorProduct,paddle.nn.layer.BilinearTensorProduct,paddle.nn.layer.common.BilinearTensorProduct +paddle.tensor.linalg.t paddle.t,paddle.tensor.t +paddle.tensor.creation.ones paddle.ones,paddle.tensor.ones +paddle.fluid.layers.data_norm paddle.static.nn.data_norm +paddle.tensor.stat.var paddle.var,paddle.tensor.var +paddle.fluid.layers.maxout paddle.nn.functional.maxout,paddle.nn.functional.activation.maxout +paddle.fluid.layers.logical_or paddle.logical_or,paddle.tensor.logical_or,paddle.tensor.logic.logical_or +paddle.fluid.layers.continuous_value_model paddle.nn.functional.continuous_value_model,paddle.nn.functional.extension.continuous_value_model +paddle.fluid.layers.topk paddle.topk,paddle.tensor.topk,paddle.tensor.search.topk +paddle.fluid.layers.elementwise_div paddle.elementwise_div,paddle.tensor.elementwise_div,paddle.tensor.math.elementwise_div +paddle.fluid.layers.acos paddle.acos,paddle.tensor.acos,paddle.tensor.math.acos +paddle.fluid.dygraph.base.no_grad paddle.no_grad,paddle.framework.no_grad +paddle.fluid.dygraph.ProgramTranslator paddle.jit.ProgramTranslator +paddle.tensor.search.where paddle.where,paddle.tensor.where +paddle.tensor.creation.zeros_like paddle.zeros_like,paddle.tensor.zeros_like +paddle.nn.layer.loss paddle.nn.loss +paddle.fluid.layers.target_assign paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign +paddle.tensor.manipulation.split paddle.split,paddle.tensor.split +paddle.tensor.creation.ones_like paddle.ones_like,paddle.tensor.ones_like +paddle.tensor.creation.full_like paddle.full_like,paddle.tensor.full_like +paddle.fluid.layers.roi_align paddle.nn.functional.roi_align,paddle.nn.functional.vision.roi_align +paddle.fluid.layers.retinanet_detection_output paddle.nn.functional.retinanet_detection_output,paddle.nn.functional.vision.retinanet_detection_output +paddle.fluid.layers.reshape paddle.reshape,paddle.tensor.reshape,paddle.tensor.manipulation.reshape +paddle.fluid.layers.rank paddle.rank,paddle.tensor.rank,paddle.tensor.attribute.rank +paddle.fluid.layers.grid_sampler paddle.nn.functional.grid_sampler,paddle.nn.functional.vision.grid_sampler +paddle.fluid.dygraph.jit.save paddle.jit.save +paddle.fluid.core.CUDAPinnedPlace paddle.CUDAPinnedPlace,paddle.framework.CUDAPinnedPlace +paddle.tensor.linalg.cholesky paddle.cholesky,paddle.tensor.cholesky +paddle.tensor.creation.meshgrid paddle.meshgrid,paddle.tensor.meshgrid +paddle.fluid.layers.sigmoid_cross_entropy_with_logits paddle.nn.functional.sigmoid_cross_entropy_with_logits,paddle.nn.functional.loss.sigmoid_cross_entropy_with_logits +paddle.fluid.layers.pool3d paddle.nn.functional.pool3d,paddle.nn.functional.pooling.pool3d +paddle.fluid.layers.fill_constant paddle.fill_constant,paddle.tensor.fill_constant,paddle.tensor.creation.fill_constant +paddle.fluid.layers.expand paddle.expand,paddle.tensor.expand,paddle.tensor.manipulation.expand +paddle.fluid.layers.deformable_roi_pooling paddle.nn.functional.deformable_roi_pooling,paddle.nn.functional.vision.deformable_roi_pooling +paddle.fluid.initializer.Constant paddle.nn.initializer.Constant +paddle.fluid.dygraph.container.Sequential paddle.nn.Sequential +paddle.fluid.compiler.CompiledProgram paddle.static.CompiledProgram +paddle.tensor.search.index_select paddle.index_select,paddle.tensor.index_select +paddle.nn.layer.conv paddle.nn.conv +paddle.fluid.layers.switch_case paddle.nn.switch_case,paddle.nn.control_flow.switch_case +paddle.fluid.layers.sinh paddle.sinh,paddle.tensor.sinh,paddle.tensor.math.sinh +paddle.fluid.layers.shape paddle.shape,paddle.tensor.shape,paddle.tensor.attribute.shape +paddle.fluid.layers.pool2d paddle.nn.functional.pool2d,paddle.nn.functional.pooling.pool2d +paddle.fluid.layers.multiclass_nms paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms +paddle.fluid.layers.log paddle.log,paddle.tensor.log,paddle.tensor.math.log +paddle.fluid.layers.increment paddle.increment,paddle.tensor.increment,paddle.tensor.math.increment +paddle.fluid.layers.fsp_matrix paddle.nn.functional.fsp_matrix,paddle.nn.functional.vision.fsp_matrix +paddle.fluid.layers.beam_search_decode paddle.nn.beam_search_decode,paddle.nn.decode.beam_search_decode +paddle.fluid.initializer.Xavier paddle.nn.initializer.Xavier +paddle.fluid.framework.in_dygraph_mode paddle.in_dynamic_mode +paddle.fluid.dygraph.io.TranslatedLayer paddle.jit.TranslatedLayer +paddle.fluid.layers.sign paddle.sign,paddle.tensor.sign,paddle.tensor.math.sign +paddle.fluid.layers.density_prior_box paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box +paddle.fluid.layers.control_flow.Print paddle.static.Print +paddle.nn.functional.activation.hsigmoid paddle.nn.functional.hsigmoid +paddle.fluid.layers.linspace paddle.linspace,paddle.tensor.linspace,paddle.tensor.creation.linspace +paddle.tensor.math.sum paddle.sum,paddle.tensor.sum +paddle.nn.functional.activation.sigmoid paddle.nn.functional.sigmoid +paddle.fluid.one_hot paddle.nn.functional.one_hot,paddle.nn.functional.common.one_hot +paddle.fluid.layers.softsign paddle.nn.functional.softsign,paddle.nn.functional.activation.softsign +paddle.fluid.framework.program_guard paddle.static.program_guard +paddle.fluid.dygraph.jit.TracedLayer paddle.jit.TracedLayer +paddle.tensor.random.rand paddle.rand,paddle.tensor.rand +paddle.tensor.manipulation.unbind paddle.unbind,paddle.tensor.unbind +paddle.nn.layer.loss.MarginRankingLoss paddle.nn.MarginRankingLoss,paddle.nn.layer.MarginRankingLoss +paddle.fluid.layers.similarity_focus paddle.nn.functional.similarity_focus,paddle.nn.functional.extension.similarity_focus +paddle.fluid.layers.assign paddle.nn.functional.assign,paddle.nn.functional.common.assign +paddle.fluid.layers.anchor_generator paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator +paddle.fluid.core.CUDAPlace paddle.CUDAPlace,paddle.framework.CUDAPlace +paddle.tensor.math.clamp paddle.clamp,paddle.tensor.clamp +paddle.tensor.linalg.dist paddle.dist,paddle.tensor.dist +paddle.fluid.layers.cosh paddle.cosh,paddle.tensor.cosh,paddle.tensor.math.cosh +paddle.fluid.core.CPUPlace paddle.CPUPlace,paddle.framework.CPUPlace +paddle.tensor.math.cumsum paddle.cumsum,paddle.tensor.cumsum +paddle.tensor.linalg.histogram paddle.histogram,paddle.tensor.histogram +paddle.nn.functional.common.interpolate paddle.nn.functional.interpolate +paddle.fluid.layers.roi_perspective_transform paddle.nn.functional.roi_perspective_transform,paddle.nn.functional.vision.roi_perspective_transform +paddle.fluid.layers.elementwise_floordiv paddle.elementwise_floordiv,paddle.tensor.elementwise_floordiv,paddle.tensor.math.elementwise_floordiv +paddle.fluid.framework.Program paddle.static.Program +paddle.fluid.executor.scope_guard paddle.static.scope_guard +paddle.fluid.executor.global_scope paddle.static.global_scope +paddle.fluid.dygraph.layers.Layer paddle.nn.Layer +paddle.tensor.random.randint paddle.randint,paddle.tensor.randint +paddle.tensor.math.multiply paddle.multiply,paddle.tensor.multiply +paddle.nn.layer.activation.HSigmoid paddle.nn.HSigmoid,paddle.nn.layer.HSigmoid +paddle.fluid.layers.yolo_box paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box +paddle.fluid.layers.cast paddle.cast,paddle.tensor.cast,paddle.tensor.manipulation.cast +paddle.fluid.framework.name_scope paddle.static.name_scope +paddle.fluid.executor.Executor paddle.static.Executor +paddle.tensor.manipulation.flatten paddle.flatten,paddle.tensor.flatten +paddle.nn.layer.activation.Sigmoid paddle.nn.Sigmoid,paddle.nn.layer.Sigmoid +paddle.fluid.layers.sampled_softmax_with_cross_entropy paddle.nn.functional.sampled_softmax_with_cross_entropy,paddle.nn.functional.loss.sampled_softmax_with_cross_entropy +paddle.fluid.layers.random_crop paddle.nn.functional.random_crop,paddle.nn.functional.extension.random_crop +paddle.fluid.layers.inverse_time_decay paddle.nn.functional.inverse_time_decay,paddle.nn.functional.learning_rate.inverse_time_decay +paddle.fluid.layers.detection_output paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output +paddle.fluid.layers.add_position_encoding paddle.nn.functional.add_position_encoding,paddle.nn.functional.extension.add_position_encoding +paddle.fluid.dygraph.Embedding paddle.nn.Embedding,paddle.nn.layer.Embedding,paddle.nn.layer.common.Embedding +paddle.fluid.layers.prior_box paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box +paddle.fluid.layers.nn.py_func paddle.static.py_func +paddle.fluid.layers.natural_exp_decay paddle.nn.functional.natural_exp_decay,paddle.nn.functional.learning_rate.natural_exp_decay +paddle.fluid.layers.exponential_decay paddle.nn.functional.exponential_decay,paddle.nn.functional.learning_rate.exponential_decay +paddle.fluid.initializer.Uniform paddle.nn.initializer.Uniform +paddle.fluid.save paddle.static.save,paddle.tensor.save,paddle.tensor.io.save +paddle.fluid.layers.polynomial_decay paddle.nn.functional.polynomial_decay,paddle.nn.functional.learning_rate.polynomial_decay +paddle.fluid.layers.polygon_box_transform paddle.nn.functional.polygon_box_transform,paddle.nn.functional.extension.polygon_box_transform +paddle.fluid.layers.hard_sigmoid paddle.nn.functional.hard_sigmoid,paddle.nn.functional.activation.hard_sigmoid +paddle.fluid.input.embedding paddle.static.nn.embedding +paddle.fluid.initializer.MSRA paddle.nn.initializer.MSRA +paddle.tensor.math.logsumexp paddle.logsumexp,paddle.tensor.logsumexp +paddle.tensor.math.elementwise_sum paddle.elementwise_sum,paddle.tensor.elementwise_sum +paddle.nn.layer.activation.ReLU paddle.nn.ReLU,paddle.nn.layer.ReLU +paddle.fluid.layers.unique_with_counts paddle.unique_with_counts,paddle.tensor.unique_with_counts,paddle.tensor.manipulation.unique_with_counts +paddle.fluid.layers.scatter paddle.scatter,paddle.tensor.scatter,paddle.tensor.manipulation.scatter +paddle.fluid.layers.reduce_mean paddle.reduce_mean,paddle.tensor.reduce_mean,paddle.tensor.stat.reduce_mean +paddle.fluid.layers.psroi_pool paddle.nn.functional.psroi_pool,paddle.nn.functional.vision.psroi_pool +paddle.fluid.layers.prroi_pool paddle.nn.functional.prroi_pool,paddle.nn.functional.vision.prroi_pool +paddle.fluid.layers.pixel_shuffle paddle.nn.functional.pixel_shuffle,paddle.nn.functional.vision.pixel_shuffle +paddle.fluid.layers.piecewise_decay paddle.nn.functional.piecewise_decay,paddle.nn.functional.learning_rate.piecewise_decay +paddle.fluid.layers.is_empty paddle.is_empty,paddle.tensor.is_empty,paddle.tensor.logic.is_empty +paddle.fluid.layers.iou_similarity paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity +paddle.fluid.dygraph.parallel.ParallelEnv paddle.ParallelEnv,paddle.framework.ParallelEnv +paddle.fluid.layers.nce paddle.static.nn.nce +paddle.fluid.layers.logsigmoid paddle.nn.functional.logsigmoid,paddle.nn.functional.activation.logsigmoid +paddle.tensor.linalg.norm paddle.norm,paddle.tensor.norm +paddle.nn.layer.conv.Conv3D paddle.nn.Conv3D,paddle.nn.layer.Conv3D +paddle.fluid.layers.roi_pool paddle.nn.functional.roi_pool,paddle.nn.functional.vision.roi_pool +paddle.fluid.layers.pad paddle.nn.functional.pad,paddle.nn.functional.common.pad +paddle.fluid.layers.linear_lr_warmup paddle.nn.functional.linear_lr_warmup,paddle.nn.functional.learning_rate.linear_lr_warmup +paddle.nn.layer.conv.Conv2D paddle.nn.Conv2D,paddle.nn.layer.Conv2D +paddle.fluid.layers.hsigmoid paddle.static.nn.hsigmoid +paddle.fluid.layers.cosine_decay paddle.nn.functional.cosine_decay,paddle.nn.functional.learning_rate.cosine_decay +paddle.fluid.layers.cond paddle.nn.cond,paddle.nn.control_flow.cond paddle.fluid.dygraph.container.ParameterList paddle.nn.ParameterList -paddle.fluid.initializer.Bilinear paddle.nn.initializer.Bilinear -paddle.tensor.manipulation.stack paddle.stack -paddle.fluid.layers.scatter_nd paddle.tensor.scatter_nd -paddle.fluid.layers.roi_pool paddle.nn.functional.roi_pool -paddle.fluid.layers.prior_box paddle.nn.functional.vision.prior_box -paddle.tensor.math.min paddle.tensor.min -paddle.tensor.linalg.matmul paddle.matmul -paddle.nn.layer.loss paddle.nn.loss -paddle.tensor.math.floor paddle.tensor.floor -paddle.tensor.math.elementwise_add paddle.tensor.elementwise_add -paddle.tensor.math.div paddle.div -paddle.tensor.manipulation.concat paddle.concat -paddle.fluid.layers.adaptive_pool2d paddle.nn.functional.pooling.adaptive_pool2d -paddle.fluid.dygraph.Linear paddle.nn.layer.common.Linear -paddle.nn.layer.activation.LeakyReLU paddle.nn.LeakyReLU -paddle.fluid.layers.yolo_box paddle.nn.functional.vision.yolo_box -paddle.fluid.layers.while_loop paddle.nn.while_loop -paddle.tensor.manipulation.unbind paddle.unbind -paddle.fluid.layers.scatter_nd_add paddle.tensor.scatter_nd_add -paddle.fluid.layers.roi_perspective_transform paddle.nn.functional.roi_perspective_transform -paddle.fluid.layers.random_crop paddle.nn.functional.random_crop -paddle.fluid.layers.logical_and paddle.logical_and -paddle.tensor.math.log paddle.log -paddle.fluid.layers.gather_nd paddle.tensor.gather_nd -paddle.tensor.creation.eye paddle.tensor.eye -paddle.fluid.data paddle.nn.input.data -paddle.tensor.linalg.cross paddle.tensor.cross -paddle.fluid.dygraph.SpectralNorm paddle.nn.SpectralNorm -paddle.nn.layer.activation.LogSoftmax paddle.nn.LogSoftmax -paddle.nn.layer.norm.InstanceNorm paddle.nn.InstanceNorm -paddle.nn.layer.conv.Conv3DTranspose paddle.nn.layer.Conv3DTranspose -paddle.nn.layer.conv.Conv2DTranspose paddle.nn.layer.Conv2DTranspose -paddle.nn.layer.conv.Conv2D paddle.nn.Conv2D -paddle.fluid.layers.unique_with_counts paddle.tensor.manipulation.unique_with_counts -paddle.fluid.layers.transpose paddle.tensor.linalg.transpose -paddle.fluid.layers.softmax_with_cross_entropy paddle.nn.functional.softmax_with_cross_entropy -paddle.fluid.layers.reduce_mean paddle.tensor.stat.reduce_mean -paddle.tensor.logic.greater_equal paddle.greater_equal -paddle.tensor.creation.arange paddle.tensor.arange -paddle.framework.Print paddle.Print -paddle.framework.BuildStrategy paddle.BuildStrategy -paddle.tensor.stat.var paddle.tensor.var -paddle.fluid.layers.thresholded_relu paddle.nn.functional.activation.thresholded_relu -paddle.fluid.layers.sigmoid_cross_entropy_with_logits paddle.nn.functional.loss.sigmoid_cross_entropy_with_logits -paddle.fluid.layers.rank paddle.tensor.attribute.rank -paddle.fluid.layers.image_resize_short paddle.nn.functional.image_resize_short -paddle.fluid.layers.has_nan paddle.has_nan -paddle.fluid.layers.expand paddle.tensor.manipulation.expand -paddle.fluid.layers.erf paddle.tensor.math.erf -paddle.tensor.logic.equal paddle.tensor.equal -paddle.fluid.layers.cast paddle.tensor.manipulation.cast -paddle.fluid.initializer.Normal paddle.nn.initializer.Normal +paddle.fluid.dygraph.base.grad paddle.grad,paddle.framework.grad +paddle.fluid.dygraph.Flatten paddle.nn.Flatten,paddle.nn.layer.Flatten,paddle.nn.layer.common.Flatten +paddle.fluid.layers.tanh paddle.tanh,paddle.tensor.tanh,paddle.tensor.math.tanh +paddle.fluid.layers.reduce_sum paddle.reduce_sum,paddle.tensor.reduce_sum,paddle.tensor.math.reduce_sum +paddle.fluid.layers.label_smooth paddle.nn.functional.label_smooth,paddle.nn.functional.common.label_smooth +paddle.fluid.layers.softmax_with_cross_entropy paddle.nn.functional.softmax_with_cross_entropy,paddle.nn.functional.loss.softmax_with_cross_entropy +paddle.fluid.layers.noam_decay paddle.nn.functional.noam_decay,paddle.nn.functional.learning_rate.noam_decay +paddle.fluid.layers.dropout paddle.nn.functional.dropout,paddle.nn.functional.common.dropout +paddle.tensor.manipulation.gather paddle.gather,paddle.tensor.gather +paddle.nn.layer.activation.LeakyReLU paddle.nn.LeakyReLU,paddle.nn.layer.LeakyReLU +paddle.fluid.layers.isfinite paddle.isfinite,paddle.tensor.isfinite,paddle.tensor.logic.isfinite +paddle.fluid.layers.elementwise_mul paddle.tensor.math.elementwise_mul +paddle.fluid.layers.crf_decoding paddle.static.nn.crf_decoding +paddle.fluid.dygraph.learning_rate_scheduler.InverseTimeDecay paddle.InverseTimeDecay,paddle.framework.InverseTimeDecay +paddle.fluid.dygraph.learning_rate_scheduler.ExponentialDecay paddle.ExponentialDecay,paddle.framework.ExponentialDecay +paddle.tensor.math.max paddle.max,paddle.tensor.max +paddle.fluid.layers.teacher_student_sigmoid_loss paddle.nn.functional.teacher_student_sigmoid_loss,paddle.nn.functional.loss.teacher_student_sigmoid_loss +paddle.fluid.dygraph.parallel.DataParallel paddle.DataParallel,paddle.framework.DataParallel +paddle.fluid.dygraph.learning_rate_scheduler.PolynomialDecay paddle.PolynomialDecay,paddle.framework.PolynomialDecay +paddle.fluid.dygraph.learning_rate_scheduler.NaturalExpDecay paddle.NaturalExpDecay,paddle.framework.NaturalExpDecay paddle.fluid.dygraph.container.LayerList paddle.nn.LayerList -paddle.fluid.layers.unique paddle.tensor.manipulation.unique -paddle.tensor.math.trace paddle.trace -paddle.fluid.layers.reshape paddle.tensor.reshape -paddle.fluid.layers.lrn paddle.nn.functional.norm.lrn -paddle.fluid.layers.isfinite paddle.isfinite -paddle.tensor.search.index_select paddle.index_select -paddle.fluid.layers.gelu paddle.nn.functional.activation.gelu -paddle.fluid.layers.fill_constant paddle.fill_constant -paddle.fluid.layers.expand_as paddle.tensor.manipulation.expand_as -paddle.tensor.math.elementwise_sum paddle.tensor.elementwise_sum -paddle.fluid.initializer.MSRA paddle.nn.initializer.MSRA -paddle.fluid.dygraph.LayerNorm paddle.nn.LayerNorm -paddle.fluid.dygraph.GroupNorm paddle.nn.GroupNorm -paddle.fluid.dygraph.BatchNorm paddle.nn.BatchNorm -paddle.tensor.math.scale paddle.tensor.scale -paddle.tensor.math.reciprocal paddle.reciprocal -paddle.fluid.layers.polygon_box_transform paddle.nn.functional.polygon_box_transform -paddle.fluid.layers.pad2d paddle.nn.functional.common.pad2d -paddle.tensor.math.max paddle.max -paddle.fluid.layers.logical_or paddle.tensor.logic.logical_or -paddle.tensor.logic.less_equal paddle.less_equal -paddle.tensor.math.cosh paddle.cosh -paddle.tensor.math.ceil paddle.ceil -paddle.nn.layer.activation.ReLU paddle.nn.ReLU -paddle.fluid.layers.shape paddle.tensor.shape -paddle.tensor.math.pow paddle.tensor.pow -paddle.tensor.creation.ones paddle.tensor.ones -paddle.tensor.logic.not_equal paddle.not_equal -paddle.fluid.layers.anchor_generator paddle.nn.functional.anchor_generator -paddle.tensor.logic.allclose paddle.tensor.allclose -paddle.fluid.layers.warpctc paddle.nn.functional.extension.warpctc -paddle.fluid.layers.scatter_nd paddle.scatter_nd -paddle.fluid.layers.reduce_mean paddle.tensor.reduce_mean -paddle.fluid.layers.prior_box paddle.nn.functional.prior_box -paddle.tensor.math.min paddle.min -paddle.fluid.layers.log paddle.tensor.math.log -paddle.fluid.layers.hard_sigmoid paddle.nn.functional.hard_sigmoid -paddle.tensor.math.floor paddle.floor -paddle.tensor.math.elementwise_add paddle.elementwise_add -paddle.fluid.data paddle.nn.data -paddle.fluid.dygraph.Linear paddle.nn.layer.Linear -paddle.nn.layer.conv.Conv3DTranspose paddle.nn.Conv3DTranspose -paddle.nn.layer.conv.Conv2DTranspose paddle.nn.Conv2DTranspose -paddle.fluid.layers.yolo_box paddle.nn.functional.yolo_box -paddle.fluid.layers.sigmoid_cross_entropy_with_logits paddle.nn.functional.sigmoid_cross_entropy_with_logits -paddle.fluid.layers.scatter_nd_add paddle.scatter_nd_add -paddle.fluid.layers.relu6 paddle.nn.functional.activation.relu6 -paddle.fluid.layers.gather_nd paddle.gather_nd -paddle.tensor.creation.eye paddle.eye -paddle.tensor.linalg.cross paddle.cross -paddle.fluid.layers.adaptive_pool2d paddle.nn.functional.adaptive_pool2d -paddle.fluid.layers.transpose paddle.tensor.transpose -paddle.tensor.stat.std paddle.tensor.std -paddle.tensor.math.sinh paddle.tensor.sinh -paddle.fluid.save paddle.tensor.io.save -paddle.fluid.layers.retinanet_target_assign paddle.nn.functional.vision.retinanet_target_assign -paddle.tensor.math.reduce_sum paddle.tensor.reduce_sum -paddle.fluid.layers.pool2d paddle.nn.functional.pooling.pool2d -paddle.tensor.math.mm paddle.tensor.mm -paddle.fluid.layers.mean paddle.tensor.stat.mean -paddle.fluid.layers.lrn paddle.nn.functional.lrn -paddle.fluid.layers.logsigmoid paddle.nn.functional.logsigmoid -paddle.fluid.layers.leaky_relu paddle.nn.functional.activation.leaky_relu -paddle.tensor.linalg.dist paddle.tensor.dist -paddle.tensor.creation.arange paddle.arange -paddle.fluid.dygraph.layers.Layer paddle.nn.Layer -paddle.tensor.stat.var paddle.var -paddle.fluid.layers.soft_relu paddle.nn.functional.activation.soft_relu -paddle.nn.functional.extension.row_conv paddle.nn.functional.row_conv -paddle.fluid.layers.inverse_time_decay paddle.nn.functional.learning_rate.inverse_time_decay -paddle.fluid.layers.fsp_matrix paddle.nn.functional.vision.fsp_matrix -paddle.tensor.logic.equal paddle.equal -paddle.fluid.layers.cross_entropy paddle.nn.functional.loss.cross_entropy -paddle.fluid.layers.box_decoder_and_assign paddle.nn.functional.vision.box_decoder_and_assign -paddle.fluid.layers.stanh paddle.tensor.math.stanh -paddle.fluid.layers.reshape paddle.reshape -paddle.fluid.layers.natural_exp_decay paddle.nn.functional.learning_rate.natural_exp_decay -paddle.fluid.layers.logical_or paddle.tensor.logical_or -paddle.tensor.math.log1p paddle.tensor.log1p -paddle.nn.functional.activation.hsigmoid paddle.nn.functional.hsigmoid -paddle.fluid.layers.exponential_decay paddle.nn.functional.learning_rate.exponential_decay -paddle.tensor.math.elementwise_sum paddle.elementwise_sum -paddle.fluid.layers.sinh paddle.tensor.math.sinh -paddle.fluid.layers.similarity_focus paddle.nn.functional.extension.similarity_focus -paddle.nn.functional.activation.sigmoid paddle.nn.functional.sigmoid -paddle.tensor.math.scale paddle.scale -paddle.fluid.save paddle.tensor.save -paddle.fluid.layers.rpn_target_assign paddle.nn.functional.extension.rpn_target_assign -paddle.fluid.layers.polynomial_decay paddle.nn.functional.learning_rate.polynomial_decay -paddle.fluid.layers.pad2d paddle.nn.functional.pad2d -paddle.fluid.layers.elu paddle.nn.functional.activation.elu -paddle.framework.WeightNormParamAttr paddle.WeightNormParamAttr -paddle.tensor.creation.zeros paddle.tensor.zeros -paddle.fluid.layers.shape paddle.shape -paddle.fluid.layers.rank paddle.tensor.rank -paddle.tensor.math.pow paddle.pow -paddle.fluid.layers.piecewise_decay paddle.nn.functional.learning_rate.piecewise_decay -paddle.fluid.layers.pad_constant_like paddle.nn.functional.common.pad_constant_like -paddle.tensor.creation.ones paddle.ones -paddle.fluid.layers.create_tensor paddle.tensor.creation.create_tensor -paddle.tensor.logic.allclose paddle.allclose -paddle.fluid.dygraph.Linear paddle.nn.Linear -paddle.fluid.layers.thresholded_relu paddle.nn.functional.thresholded_relu -paddle.fluid.layers.space_to_depth paddle.nn.functional.vision.space_to_depth -paddle.fluid.layers.reduce_mean paddle.reduce_mean -paddle.fluid.layers.mean paddle.tensor.mean -paddle.tensor.creation.full paddle.tensor.full -paddle.fluid.layers.brelu paddle.nn.functional.activation.brelu -paddle.tensor.math.abs paddle.tensor.abs -paddle.fluid.layers.abs paddle.tensor.math.abs -paddle.framework.Variable paddle.Variable -paddle.nn.layer.loss.CrossEntropyLoss paddle.nn.layer.CrossEntropyLoss -paddle.fluid.layers.unique_with_counts paddle.tensor.unique_with_counts -paddle.fluid.layers.switch_case paddle.nn.control_flow.switch_case -paddle.framework.gradients paddle.gradients -paddle.fluid.layers.gelu paddle.nn.functional.gelu -paddle.fluid.layers.exp paddle.tensor.math.exp -paddle.tensor.math.elementwise_sub paddle.tensor.elementwise_sub -paddle.fluid.layers.cross_entropy paddle.nn.functional.cross_entropy -paddle.fluid.layers.crop_tensor paddle.tensor.creation.crop_tensor -paddle.tensor.linalg.cholesky paddle.tensor.cholesky -paddle.fluid.layers.transpose paddle.transpose -paddle.fluid.layers.target_assign paddle.nn.functional.extension.target_assign -paddle.tensor.stat.std paddle.std -paddle.tensor.math.square paddle.tensor.square -paddle.tensor.math.sinh paddle.sinh -paddle.fluid.layers.selu paddle.nn.functional.activation.selu -paddle.fluid.layers.roi_align paddle.nn.functional.vision.roi_align -paddle.fluid.layers.retinanet_target_assign paddle.nn.functional.retinanet_target_assign -paddle.tensor.math.reduce_sum paddle.reduce_sum -paddle.tensor.math.logsumexp paddle.tensor.logsumexp -paddle.fluid.layers.hard_swish paddle.nn.functional.activation.hard_swish -paddle.fluid.layers.expand paddle.tensor.expand -paddle.tensor.linalg.dist paddle.dist -paddle.fluid.layers.cosine_decay paddle.nn.functional.learning_rate.cosine_decay -paddle.fluid.layers.cast paddle.tensor.cast -paddle.fluid.layers.affine_grid paddle.nn.functional.vision.affine_grid -paddle.nn.layer.activation.HSigmoid paddle.nn.layer.HSigmoid -paddle.fluid.layers.warpctc paddle.nn.functional.warpctc -paddle.tensor.manipulation.unsqueeze paddle.tensor.unsqueeze -paddle.fluid.layers.unique paddle.tensor.unique -paddle.tensor.math.stanh paddle.tensor.stanh -paddle.framework.program_guard paddle.program_guard -paddle.fluid.layers.pool2d paddle.nn.functional.pool2d -paddle.fluid.layers.fsp_matrix paddle.nn.functional.fsp_matrix -paddle.fluid.layers.expand_as paddle.tensor.expand_as -paddle.fluid.layers.cosh paddle.tensor.math.cosh -paddle.fluid.layers.box_decoder_and_assign paddle.nn.functional.box_decoder_and_assign -paddle.framework.append_backward paddle.append_backward -paddle.nn.layer.activation.Sigmoid paddle.nn.layer.Sigmoid -paddle.tensor.math.tanh paddle.tensor.tanh -paddle.fluid.layers.noam_decay paddle.nn.functional.learning_rate.noam_decay -paddle.fluid.layers.logical_or paddle.logical_or -paddle.tensor.math.log1p paddle.log1p -paddle.fluid.layers.floor paddle.tensor.math.floor -paddle.fluid.initializer.Uniform paddle.nn.initializer.Uniform -paddle.tensor.manipulation.squeeze paddle.tensor.squeeze -paddle.fluid.layers.softplus paddle.nn.functional.activation.softplus -paddle.framework.scope_guard paddle.scope_guard -paddle.fluid.save paddle.save -paddle.fluid.layers.relu6 paddle.nn.functional.relu6 -paddle.fluid.layers.multiclass_nms paddle.nn.functional.extension.multiclass_nms -paddle.fluid.layers.label_smooth paddle.nn.functional.common.label_smooth -paddle.fluid.layers.is_empty paddle.tensor.logic.is_empty -paddle.fluid.dygraph.BilinearTensorProduct paddle.nn.layer.common.BilinearTensorProduct -paddle.tensor.creation.zeros paddle.zeros -paddle.fluid.layers.softsign paddle.nn.functional.activation.softsign -paddle.fluid.layers.rank paddle.rank -paddle.tensor.random.rand paddle.tensor.rand -paddle.fluid.layers.pad_constant_like paddle.nn.functional.pad_constant_like -paddle.fluid.layers.linear_lr_warmup paddle.nn.functional.learning_rate.linear_lr_warmup -paddle.fluid.layers.leaky_relu paddle.nn.functional.leaky_relu -paddle.nn.layer.loss.CrossEntropyLoss paddle.nn.CrossEntropyLoss +paddle.tensor.math.mm paddle.mm,paddle.tensor.mm +paddle.tensor.creation.triu paddle.triu,paddle.tensor.triu +paddle.fluid.param_attr.WeightNormParamAttr paddle.static.WeightNormParamAttr +paddle.fluid.layers.stanh paddle.stanh,paddle.tensor.stanh,paddle.tensor.math.stanh +paddle.fluid.layers.resize_trilinear paddle.nn.functional.resize_trilinear,paddle.nn.functional.vision.resize_trilinear +paddle.fluid.dygraph.learning_rate_scheduler.PiecewiseDecay paddle.PiecewiseDecay,paddle.framework.PiecewiseDecay +paddle.tensor.math.min paddle.min,paddle.tensor.min +paddle.tensor.manipulation.unsqueeze paddle.unsqueeze,paddle.tensor.unsqueeze +paddle.tensor.manipulation.stack paddle.stack,paddle.tensor.stack +paddle.tensor.creation.full paddle.full,paddle.tensor.full +paddle.fluid.layers.resize_bilinear paddle.nn.functional.resize_bilinear,paddle.nn.functional.vision.resize_bilinear +paddle.fluid.layers.image_resize_short paddle.nn.functional.image_resize_short,paddle.nn.functional.vision.image_resize_short +paddle.fluid.layers.gather_tree paddle.nn.gather_tree,paddle.nn.decode.gather_tree +paddle.fluid.layers.beam_search paddle.nn.beam_search,paddle.nn.decode.beam_search +paddle.fluid.backward.append_backward paddle.static.append_backward +paddle.tensor.math.trace paddle.trace,paddle.tensor.trace +paddle.nn.functional.loss.margin_ranking_loss paddle.nn.functional.margin_ranking_loss +paddle.fluid.layers.resize_nearest paddle.nn.functional.resize_nearest,paddle.nn.functional.vision.resize_nearest +paddle.fluid.layers.exp paddle.exp,paddle.tensor.exp,paddle.tensor.math.exp +paddle.fluid.layers.elementwise_sub paddle.elementwise_sub,paddle.tensor.elementwise_sub,paddle.tensor.math.elementwise_sub +paddle.fluid.layers.ceil paddle.ceil,paddle.tensor.ceil,paddle.tensor.math.ceil +paddle.fluid.io.shuffle paddle.shuffle,paddle.tensor.shuffle,paddle.tensor.random.shuffle +paddle.fluid.dygraph.jit.load paddle.jit.load +paddle.tensor.manipulation.squeeze paddle.squeeze,paddle.tensor.squeeze +paddle.tensor.linalg.cross paddle.cross,paddle.tensor.cross +paddle.nn.functional.conv.conv3d_transpose paddle.nn.functional.conv3d_transpose +paddle.nn.functional.conv.conv2d_transpose paddle.nn.functional.conv2d_transpose +paddle.fluid.layers.square paddle.square,paddle.tensor.square,paddle.tensor.math.square +paddle.fluid.layers.hard_swish paddle.nn.functional.hard_swish,paddle.nn.functional.activation.hard_swish +paddle.fluid.layers.case paddle.nn.case,paddle.nn.control_flow.case +paddle.fluid.layers.bipartite_match paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match +paddle.fluid.initializer.Bilinear paddle.nn.initializer.Bilinear +paddle.fluid.dygraph.learning_rate_scheduler.CosineDecay paddle.CosineDecay,paddle.framework.CosineDecay +paddle.tensor.random.randperm paddle.randperm,paddle.tensor.randperm +paddle.tensor.creation.eye paddle.eye,paddle.tensor.eye +paddle.nn.layer.loss.CrossEntropyLoss paddle.nn.CrossEntropyLoss,paddle.nn.layer.CrossEntropyLoss +paddle.fluid.layers.thresholded_relu paddle.nn.functional.thresholded_relu,paddle.nn.functional.activation.thresholded_relu +paddle.fluid.layers.filter_by_instag paddle.nn.functional.filter_by_instag,paddle.nn.functional.extension.filter_by_instag +paddle.fluid.layers.box_clip paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip +paddle.tensor.logic.greater_than paddle.greater_than,paddle.tensor.greater_than +paddle.fluid.layers.sin paddle.sin,paddle.tensor.sin,paddle.tensor.math.sin +paddle.fluid.layers.has_nan paddle.has_nan,paddle.tensor.has_nan,paddle.tensor.search.has_nan +paddle.fluid.dygraph.learning_rate_scheduler.NoamDecay paddle.NoamDecay,paddle.framework.NoamDecay +paddle.tensor.stat.std paddle.std,paddle.tensor.std +paddle.tensor.math.log1p paddle.log1p,paddle.tensor.log1p +paddle.tensor.math.kron paddle.kron,paddle.tensor.kron +paddle.nn.layer.loss.L1Loss paddle.nn.L1Loss,paddle.nn.layer.L1Loss +paddle.fluid.layers.unstack paddle.unstack,paddle.tensor.unstack,paddle.tensor.manipulation.unstack +paddle.fluid.layers.strided_slice paddle.strided_slice,paddle.tensor.strided_slice,paddle.tensor.manipulation.strided_slice +paddle.fluid.clip.GradientClipByGlobalNorm paddle.nn.GradientClipByGlobalNorm,paddle.nn.clip.GradientClipByGlobalNorm +paddle.tensor.logic.less_than paddle.less_than,paddle.tensor.less_than +paddle.fluid.layers.swish paddle.nn.functional.swish,paddle.nn.functional.activation.swish +paddle.tensor.manipulation.concat paddle.concat,paddle.tensor.concat +paddle.tensor.creation.tril paddle.tril,paddle.tensor.tril +paddle.nn.layer.norm.InstanceNorm paddle.nn.InstanceNorm,paddle.nn.layer.InstanceNorm +paddle.nn.layer.extension.RowConv paddle.nn.RowConv,paddle.nn.layer.RowConv +paddle.nn.layer.conv.Conv3DTranspose paddle.nn.Conv3DTranspose,paddle.nn.layer.Conv3DTranspose +paddle.nn.layer.conv.Conv2DTranspose paddle.nn.Conv2DTranspose,paddle.nn.layer.Conv2DTranspose +paddle.fluid.layers.sigmoid_focal_loss paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss +paddle.fluid.layers.reduce_any paddle.reduce_any,paddle.tensor.reduce_any,paddle.tensor.logic.reduce_any +paddle.fluid.layers.elu paddle.nn.functional.elu,paddle.nn.functional.activation.elu +paddle.fluid.layers.conv3d_transpose paddle.static.nn.conv3d_transpose +paddle.fluid.layers.conv2d_transpose paddle.static.nn.conv2d_transpose +paddle.fluid.dygraph.base.to_variable paddle.to_variable,paddle.framework.to_variable +paddle.fluid.dygraph.Linear paddle.nn.Linear,paddle.nn.layer.Linear,paddle.nn.layer.common.Linear +paddle.fluid.backward.gradients paddle.static.gradients +paddle.tensor.logic.greater_equal paddle.greater_equal,paddle.tensor.greater_equal +paddle.fluid.param_attr.ParamAttr paddle.ParamAttr,paddle.framework.ParamAttr +paddle.fluid.layers.square_error_cost paddle.nn.functional.square_error_cost,paddle.nn.functional.loss.square_error_cost +paddle.fluid.layers.space_to_depth paddle.nn.functional.space_to_depth,paddle.nn.functional.vision.space_to_depth +paddle.fluid.layers.leaky_relu paddle.nn.functional.leaky_relu,paddle.nn.functional.activation.leaky_relu +paddle.fluid.layers.cross_entropy paddle.nn.functional.cross_entropy,paddle.nn.functional.loss.cross_entropy +paddle.fluid.layers.cos paddle.cos,paddle.tensor.cos,paddle.tensor.math.cos +paddle.tensor.search.index_sample paddle.index_sample,paddle.tensor.index_sample +paddle.nn.functional.extension.row_conv paddle.nn.functional.row_conv +paddle.nn.functional.activation.hardshrink paddle.nn.functional.hardshrink +paddle.fluid.layers.tensor.create_parameter paddle.create_parameter,paddle.framework.create_parameter +paddle.fluid.layers.soft_relu paddle.nn.functional.soft_relu,paddle.nn.functional.activation.soft_relu +paddle.fluid.layers.reduce_max paddle.reduce_max,paddle.tensor.reduce_max,paddle.tensor.math.reduce_max +paddle.fluid.layers.expand_as paddle.expand_as,paddle.tensor.expand_as,paddle.tensor.manipulation.expand_as +paddle.fluid.layers.atan paddle.atan,paddle.tensor.atan,paddle.tensor.math.atan +paddle.fluid.io.load paddle.static.load,paddle.tensor.load,paddle.tensor.io.load +paddle.tensor.search.argsort paddle.argsort,paddle.tensor.argsort +paddle.fluid.layers.round paddle.round,paddle.tensor.round,paddle.tensor.math.round +paddle.fluid.layers.elementwise_mod paddle.elementwise_mod,paddle.tensor.elementwise_mod,paddle.tensor.math.elementwise_mod +paddle.fluid.dygraph.Pool2D paddle.nn.Pool2D,paddle.nn.layer.Pool2D,paddle.nn.layer.common.Pool2D +paddle.tensor.logic.less_equal paddle.less_equal,paddle.tensor.less_equal +paddle.nn.layer.distance.PairwiseDistance paddle.nn.PairwiseDistance,paddle.nn.layer.PairwiseDistance +paddle.nn.layer.common.UpSample paddle.nn.UpSample,paddle.nn.layer.UpSample +paddle.nn.functional.loss.nll_loss paddle.nn.functional.nll_loss +paddle.fluid.layers.reduce_min paddle.reduce_min,paddle.tensor.reduce_min,paddle.tensor.math.reduce_min +paddle.fluid.dygraph.parallel.prepare_context paddle.prepare_context,paddle.framework.prepare_context +paddle.tensor.logic.not_equal paddle.not_equal,paddle.tensor.not_equal +paddle.nn.functional.loss.l1_loss paddle.nn.functional.l1_loss +paddle.fluid.layers.slice paddle.slice,paddle.tensor.slice,paddle.tensor.manipulation.slice +paddle.fluid.layers.scale paddle.scale,paddle.tensor.scale,paddle.tensor.math.scale +paddle.tensor.stat.mean paddle.mean,paddle.tensor.mean +paddle.tensor.math.pow paddle.pow,paddle.tensor.pow +paddle.fluid.layers.deformable_conv paddle.static.nn.deformable_conv +paddle.fluid.framework.Variable paddle.Variable,paddle.framework.Variable +paddle.fluid.dygraph.BatchNorm paddle.nn.BatchNorm,paddle.nn.layer.BatchNorm,paddle.nn.layer.norm.BatchNorm +paddle.tensor.random.randn paddle.randn,paddle.tensor.randn +paddle.fluid.layers.yolov3_loss paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss +paddle.fluid.layers.unfold paddle.nn.functional.unfold,paddle.nn.functional.common.unfold +paddle.fluid.layers.transpose paddle.transpose,paddle.tensor.transpose,paddle.tensor.linalg.transpose,paddle.tensor.manipulation.transpose +paddle.fluid.layers.generate_proposal_labels paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels +paddle.fluid.layers.center_loss paddle.nn.functional.center_loss,paddle.nn.functional.loss.center_loss +paddle.fluid.dygraph.SpectralNorm paddle.nn.SpectralNorm,paddle.nn.layer.SpectralNorm,paddle.nn.layer.norm.SpectralNorm +paddle.fluid.layers.npair_loss paddle.nn.functional.npair_loss,paddle.nn.functional.loss.npair_loss +paddle.fluid.layers.kldiv_loss paddle.nn.functional.kldiv_loss,paddle.nn.functional.loss.kldiv_loss +paddle.fluid.layers.huber_loss paddle.nn.functional.huber_loss,paddle.nn.functional.loss.huber_loss +paddle.fluid.dygraph.checkpoint.save_dygraph paddle.save,paddle.framework.save +paddle.fluid.dygraph.checkpoint.load_dygraph paddle.load,paddle.framework.load +paddle.tensor.logic.equal paddle.equal,paddle.tensor.equal +paddle.nn.layer.common.Pad2D paddle.nn.Pad2D,paddle.nn.layer.Pad2D +paddle.nn.functional.activation.log_softmax paddle.nn.functional.log_softmax +paddle.fluid.layers.shuffle_channel paddle.nn.functional.shuffle_channel,paddle.nn.functional.vision.shuffle_channel +paddle.fluid.layers.rank_loss paddle.nn.functional.rank_loss,paddle.nn.functional.loss.rank_loss +paddle.fluid.layers.gelu paddle.nn.functional.gelu,paddle.nn.functional.activation.gelu +paddle.fluid.layers.erf paddle.erf,paddle.tensor.erf,paddle.tensor.math.erf,paddle.nn.functional.erf,paddle.nn.functional.activation.erf +paddle.fluid.layers.dice_loss paddle.nn.functional.dice_loss,paddle.nn.functional.loss.dice_loss +paddle.fluid.layers.diag paddle.diag,paddle.tensor.diag,paddle.tensor.creation.diag +paddle.fluid.layers.create_parameter paddle.static.nn.create_parameter +paddle.fluid.layers.brelu paddle.nn.functional.brelu,paddle.nn.functional.activation.brelu +paddle.fluid.compiler.ExecutionStrategy paddle.static.ExecutionStrategy +paddle.tensor.search.nonzero paddle.nonzero,paddle.tensor.nonzero +paddle.tensor.search.argmax paddle.argmax,paddle.tensor.argmax +paddle.tensor.creation.arange paddle.arange,paddle.tensor.arange +paddle.nn.functional.activation.relu paddle.nn.functional.relu +paddle.fluid.layers.ssd_loss paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss +paddle.fluid.layers.mse_loss paddle.nn.functional.mse_loss,paddle.nn.functional.loss.mse_loss +paddle.fluid.layers.log_loss paddle.nn.functional.log_loss,paddle.nn.functional.loss.log_loss +paddle.fluid.layers.image_resize paddle.nn.functional.image_resize,paddle.nn.functional.vision.image_resize +paddle.fluid.layers.bpr_loss paddle.nn.functional.bpr_loss,paddle.nn.functional.loss.bpr_loss +paddle.fluid.layers.affine_channel paddle.nn.functional.affine_channel,paddle.nn.functional.vision.affine_channel +paddle.fluid.layers.generate_mask_labels paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels +paddle.fluid.dygraph.base.disable_dygraph paddle.enable_static +paddle.fluid.data paddle.nn.data,paddle.nn.input.data +paddle.tensor.math.minimum paddle.minimum,paddle.tensor.minimum +paddle.tensor.linalg.matmul paddle.matmul,paddle.tensor.matmul +paddle.nn.functional.conv.conv3d paddle.nn.functional.conv3d +paddle.fluid.layers.sqrt paddle.sqrt,paddle.tensor.sqrt,paddle.tensor.math.sqrt +paddle.fluid.layers.pad2d paddle.nn.functional.pad2d,paddle.nn.functional.common.pad2d +paddle.fluid.layers.edit_distance paddle.nn.functional.edit_distance,paddle.nn.functional.loss.edit_distance +paddle.fluid.dygraph.base.enable_dygraph paddle.disable_static +paddle.tensor.search.sort paddle.sort,paddle.tensor.sort +paddle.nn.functional.conv.conv2d paddle.nn.functional.conv2d +paddle.nn.functional.activation.softmax paddle.nn.functional.softmax +paddle.fluid.layers.tanh_shrink paddle.nn.functional.tanh_shrink,paddle.nn.functional.activation.tanh_shrink +paddle.fluid.layers.row_conv paddle.static.nn.row_conv +paddle.fluid.layers.relu6 paddle.nn.functional.relu6,paddle.nn.functional.activation.relu6 +paddle.fluid.layers.elementwise_pow paddle.elementwise_pow,paddle.tensor.elementwise_pow,paddle.tensor.math.elementwise_pow +paddle.fluid.layers.argmin paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin +paddle.fluid.dygraph.jit.declarative paddle.jit.to_static +paddle.fluid.compiler.BuildStrategy paddle.static.BuildStrategy +paddle.tensor.manipulation.roll paddle.roll,paddle.tensor.roll +paddle.fluid.layers.temporal_shift paddle.nn.functional.temporal_shift,paddle.nn.functional.extension.temporal_shift +paddle.fluid.layers.sums paddle.sums,paddle.tensor.sums,paddle.tensor.math.sums +paddle.fluid.layers.softshrink paddle.nn.functional.softshrink,paddle.nn.functional.activation.softshrink +paddle.fluid.layers.clip paddle.nn.clip,paddle.nn.clip.clip +paddle.fluid.dygraph.GroupNorm paddle.nn.GroupNorm,paddle.nn.layer.GroupNorm,paddle.nn.layer.norm.GroupNorm +paddle.tensor.math.addmm paddle.addmm,paddle.tensor.addmm +paddle.nn.layer.activation.LogSoftmax paddle.nn.LogSoftmax,paddle.nn.layer.LogSoftmax +paddle.framework.BackwardStrategy paddle.BackwardStrategy +paddle.fluid.layers.while_loop paddle.nn.while_loop,paddle.nn.control_flow.while_loop +paddle.fluid.layers.rsqrt paddle.rsqrt,paddle.tensor.rsqrt,paddle.tensor.math.rsqrt +paddle.fluid.layers.reduce_all paddle.reduce_all,paddle.tensor.reduce_all,paddle.tensor.logic.reduce_all +paddle.nn.layer.loss.MSELoss paddle.nn.MSELoss,paddle.nn.layer.MSELoss +paddle.nn.layer.loss.BCELoss paddle.nn.BCELoss,paddle.nn.layer.BCELoss +paddle.fluid.layers.unique paddle.unique,paddle.tensor.unique,paddle.tensor.manipulation.unique +paddle.fluid.layers.distribute_fpn_proposals paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals +paddle.fluid.layers.abs paddle.abs,paddle.tensor.abs,paddle.tensor.math.abs +paddle.fluid.dygraph.LayerNorm paddle.nn.LayerNorm,paddle.nn.layer.LayerNorm,paddle.nn.layer.norm.LayerNorm +paddle.fluid.clip.GradientClipByNorm paddle.nn.GradientClipByNorm,paddle.nn.clip.GradientClipByNorm +paddle.tensor.linalg.dot paddle.dot,paddle.tensor.dot +paddle.fluid.layers.fc paddle.static.nn.fc +paddle.fluid.layers.conv3d paddle.static.nn.conv3d +paddle.tensor.math.add paddle.add,paddle.tensor.add +paddle.fluid.layers.shard_index paddle.shard_index,paddle.tensor.shard_index,paddle.tensor.manipulation.shard_index +paddle.fluid.layers.reduce_prod paddle.reduce_prod,paddle.tensor.reduce_prod,paddle.tensor.math.reduce_prod +paddle.fluid.layers.multi_box_head paddle.static.nn.multi_box_head +paddle.fluid.layers.conv2d paddle.static.nn.conv2d +paddle.fluid.layers.collect_fpn_proposals paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals +paddle.fluid.initializer.TruncatedNormal paddle.nn.initializer.TruncatedNormal +paddle.nn.functional.extension.diag_embed paddle.nn.functional.diag_embed +paddle.tensor.creation.zeros paddle.zeros,paddle.tensor.zeros +paddle.fluid.layers.warpctc paddle.nn.functional.warpctc,paddle.nn.functional.extension.warpctc +paddle.fluid.layers.reciprocal paddle.reciprocal,paddle.tensor.reciprocal,paddle.tensor.math.reciprocal +paddle.fluid.layers.logical_xor paddle.logical_xor,paddle.tensor.logical_xor,paddle.tensor.logic.logical_xor +paddle.fluid.layers.l2_normalize paddle.nn.functional.l2_normalize,paddle.nn.functional.norm.l2_normalize +paddle.tensor.math.maximum paddle.maximum,paddle.tensor.maximum +paddle.fluid.layers.generate_proposals paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals +paddle.tensor.logic.equal_all paddle.equal_all,paddle.tensor.equal_all +paddle.nn.layer.loss.NLLLoss paddle.nn.NLLLoss,paddle.nn.layer.NLLLoss +paddle.fluid.layers.retinanet_target_assign paddle.nn.functional.retinanet_target_assign,paddle.nn.functional.vision.retinanet_target_assign +paddle.fluid.layers.prelu paddle.static.nn.prelu +paddle.fluid.layers.tensor.create_global_var paddle.create_global_var,paddle.framework.create_global_var +paddle.fluid.layers.box_decoder_and_assign paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign +paddle.tensor.math.div paddle.div,paddle.tensor.div +paddle.tensor.math.addcmul paddle.addcmul,paddle.tensor.addcmul +paddle.fluid.layers.floor paddle.floor,paddle.tensor.floor,paddle.tensor.math.floor +paddle.framework.__init__.paddle.fluid.core paddle.framework.core +paddle.fluid.parallel_executor.ParallelExecutor paddle.static.ParallelExecutor +paddle.fluid.layers.spectral_norm paddle.static.nn.spectral_norm +paddle.fluid.layers.scatter_nd_add paddle.scatter_nd_add,paddle.tensor.scatter_nd_add,paddle.tensor.manipulation.scatter_nd_add +paddle.fluid.layers.lrn paddle.nn.functional.lrn,paddle.nn.functional.norm.lrn +paddle.fluid.layers.instance_norm paddle.static.nn.instance_norm +paddle.fluid.layers.hash paddle.nn.functional.hash,paddle.nn.functional.lod.hash +paddle.fluid.layers.elementwise_add paddle.elementwise_add,paddle.tensor.elementwise_add,paddle.tensor.math.elementwise_add +paddle.fluid.layers.create_tensor paddle.create_tensor,paddle.tensor.create_tensor,paddle.tensor.creation.create_tensor +paddle.fluid.layers.affine_grid paddle.nn.functional.affine_grid,paddle.nn.functional.vision.affine_grid +paddle.fluid.layers.adaptive_pool3d paddle.nn.functional.adaptive_pool3d,paddle.nn.functional.pooling.adaptive_pool3d +paddle.fluid.framework.default_startup_program paddle.static.default_startup_program +paddle.fluid.clip.GradientClipByValue paddle.nn.GradientClipByValue,paddle.nn.clip.GradientClipByValue +paddle.framework.random.manual_seed paddle.manual_seed +paddle.fluid.layers.smooth_l1 paddle.nn.functional.smooth_l1,paddle.nn.functional.loss.smooth_l1 +paddle.fluid.layers.multiplex paddle.multiplex,paddle.tensor.multiplex,paddle.tensor.math.multiplex +paddle.fluid.layers.logical_and paddle.logical_and,paddle.tensor.logical_and,paddle.tensor.logic.logical_and +paddle.fluid.layers.clip_by_norm paddle.nn.clip_by_norm,paddle.nn.clip.clip_by_norm +paddle.fluid.layers.asin paddle.asin,paddle.tensor.asin,paddle.tensor.math.asin +paddle.fluid.layers.adaptive_pool2d paddle.nn.functional.adaptive_pool2d,paddle.nn.functional.pooling.adaptive_pool2d +paddle.tensor.manipulation.flip paddle.flip,paddle.reverse,paddle.tensor.flip,paddle.tensor.reverse +paddle.tensor.logic.allclose paddle.allclose,paddle.tensor.allclose +paddle.fluid.layers.softplus paddle.nn.functional.softplus,paddle.nn.functional.activation.softplus +paddle.fluid.layers.scatter_nd paddle.scatter_nd,paddle.tensor.scatter_nd,paddle.tensor.manipulation.scatter_nd +paddle.fluid.layers.has_inf paddle.has_inf,paddle.tensor.has_inf,paddle.tensor.search.has_inf +paddle.fluid.layers.crop_tensor paddle.crop_tensor,paddle.tensor.crop_tensor,paddle.tensor.creation.crop_tensor +paddle.fluid.layers.box_coder paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder +paddle.fluid.layers.bilinear_tensor_product paddle.static.nn.bilinear_tensor_product +paddle.fluid.initializer.Normal paddle.nn.initializer.Normal +paddle.fluid.dygraph.jit.SaveLoadConfig paddle.jit.SaveLoadConfig \ No newline at end of file diff --git a/doc/paddle/api/gen_doc.py b/doc/paddle/api/gen_doc.py index e4439535a..ecd658c12 100644 --- a/doc/paddle/api/gen_doc.py +++ b/doc/paddle/api/gen_doc.py @@ -68,8 +68,8 @@ def get_alias_mapping(file="./alias_api_mapping"): for line in f.readlines(): t = line.strip().split('\t') real_api = t[0].strip() - alias_api = t[1].strip() - alias_api_map[real_api] = alias_api + alias_apis = t[1].strip().split(',') + alias_api_map[real_api] = alias_apis def is_filter_api(api): @@ -77,7 +77,7 @@ def is_filter_api(api): if api in display_doc_map: return False - #check in api in not_display_list + #check api in not_display_list for key in not_display_doc_map: #find the api if key == api: @@ -127,6 +127,14 @@ def is_filter_api(api): return False +def get_display_api(api): + # recomment alias api + if api.startswith("paddle.fluid") and alias_api_map.has_key(api): + return alias_api_map[api][0] + else: + return api + + def gen_en_files(root_path='paddle'): backup_path = root_path + "_" + str(int(time.time())) @@ -134,11 +142,15 @@ def gen_en_files(root_path='paddle'): if is_filter_api(api): continue + api = get_display_api(api) + doc_file = api.split(".")[-1] path = "/".join(api.split(".")[0:-1]) if not os.path.exists(path): os.makedirs(path) f = api.replace(".", "/") + if os.path.exists(f + en_suffix): + continue os.mknod(f + en_suffix) gen = EnDocGenerator() with gen.guard(f + en_suffix): diff --git a/doc/paddle/api/paddle/framework/CosineDecay_cn.rst b/doc/paddle/api/paddle/CosineDecay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/CosineDecay_cn.rst rename to doc/paddle/api/paddle/CosineDecay_cn.rst diff --git a/doc/paddle/api/paddle/framework/ExponentialDecay_cn.rst b/doc/paddle/api/paddle/ExponentialDecay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/ExponentialDecay_cn.rst rename to doc/paddle/api/paddle/ExponentialDecay_cn.rst diff --git a/doc/paddle/api/paddle/framework/InverseTimeDecay_cn.rst b/doc/paddle/api/paddle/InverseTimeDecay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/InverseTimeDecay_cn.rst rename to doc/paddle/api/paddle/InverseTimeDecay_cn.rst diff --git a/doc/paddle/api/paddle/framework/NaturalExpDecay_cn.rst b/doc/paddle/api/paddle/NaturalExpDecay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/NaturalExpDecay_cn.rst rename to doc/paddle/api/paddle/NaturalExpDecay_cn.rst diff --git a/doc/paddle/api/paddle/framework/NoamDecay_cn.rst b/doc/paddle/api/paddle/NoamDecay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/NoamDecay_cn.rst rename to doc/paddle/api/paddle/NoamDecay_cn.rst diff --git a/doc/paddle/api/paddle/framework/ParallelEnv_cn.rst b/doc/paddle/api/paddle/ParallelEnv_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/ParallelEnv_cn.rst rename to doc/paddle/api/paddle/ParallelEnv_cn.rst diff --git a/doc/paddle/api/paddle/framework/ParamAttr_cn.rst b/doc/paddle/api/paddle/ParamAttr_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/ParamAttr_cn.rst rename to doc/paddle/api/paddle/ParamAttr_cn.rst diff --git a/doc/paddle/api/paddle/framework/PiecewiseDecay_cn.rst b/doc/paddle/api/paddle/PiecewiseDecay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/PiecewiseDecay_cn.rst rename to doc/paddle/api/paddle/PiecewiseDecay_cn.rst diff --git a/doc/paddle/api/paddle/framework/PolynomialDecay_cn.rst b/doc/paddle/api/paddle/PolynomialDecay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/PolynomialDecay_cn.rst rename to doc/paddle/api/paddle/PolynomialDecay_cn.rst diff --git a/doc/paddle/api/paddle/framework/Variable_cn.rst b/doc/paddle/api/paddle/Variable_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/Variable_cn.rst rename to doc/paddle/api/paddle/Variable_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/abs_cn.rst b/doc/paddle/api/paddle/abs_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/abs_cn.rst rename to doc/paddle/api/paddle/abs_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/acos_cn.rst b/doc/paddle/api/paddle/acos_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/acos_cn.rst rename to doc/paddle/api/paddle/acos_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/argmin_cn.rst b/doc/paddle/api/paddle/argmin_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/argmin_cn.rst rename to doc/paddle/api/paddle/argmin_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/asin_cn.rst b/doc/paddle/api/paddle/asin_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/asin_cn.rst rename to doc/paddle/api/paddle/asin_cn.rst diff --git a/doc/paddle/api/paddle/atan_cn.rst b/doc/paddle/api/paddle/atan_cn.rst new file mode 100644 index 000000000..1c36f1047 --- /dev/null +++ b/doc/paddle/api/paddle/atan_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_fluid_layers_atan: + +atan +------------------------------- + +.. py:function:: paddle.fluid.layers.atan(x, name=None) + +:alias_main: paddle.atan +:alias: paddle.atan,paddle.tensor.atan,paddle.tensor.math.atan +:update_api: paddle.fluid.layers.atan + + + +arctanh激活函数。 + +.. math:: + out = tanh^{-1}(x) + +参数: + - **x(Variable)** - atan的输入Tensor,数据类型为 float32 或 float64 + - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + +返回: `atan` 的输出Tensor,数据类型与 `x` 相同。 + +返回类型: Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="input", shape=[4]) + # if data is [-0.8183, 0.4912, -0.6444, 0.0371] + result = fluid.layers.atan(data) + # result is [-0.6858, 0.4566, -0.5724, 0.0371] + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/cast_cn.rst b/doc/paddle/api/paddle/cast_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/cast_cn.rst rename to doc/paddle/api/paddle/cast_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/ceil_cn.rst b/doc/paddle/api/paddle/ceil_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/ceil_cn.rst rename to doc/paddle/api/paddle/ceil_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/cos_cn.rst b/doc/paddle/api/paddle/cos_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/cos_cn.rst rename to doc/paddle/api/paddle/cos_cn.rst diff --git a/doc/paddle/api/paddle/framework/create_global_var_cn.rst b/doc/paddle/api/paddle/create_global_var_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/create_global_var_cn.rst rename to doc/paddle/api/paddle/create_global_var_cn.rst diff --git a/doc/paddle/api/paddle/framework/create_parameter_cn.rst b/doc/paddle/api/paddle/create_parameter_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/create_parameter_cn.rst rename to doc/paddle/api/paddle/create_parameter_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/create_tensor_cn.rst b/doc/paddle/api/paddle/create_tensor_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/create_tensor_cn.rst rename to doc/paddle/api/paddle/create_tensor_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/crop_tensor_cn.rst b/doc/paddle/api/paddle/crop_tensor_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/crop_tensor_cn.rst rename to doc/paddle/api/paddle/crop_tensor_cn.rst diff --git a/doc/paddle/api/paddle/dataset/common/split_cn.rst b/doc/paddle/api/paddle/dataset/common/split_cn.rst index 52f85fccc..ca1607ee5 100644 --- a/doc/paddle/api/paddle/dataset/common/split_cn.rst +++ b/doc/paddle/api/paddle/dataset/common/split_cn.rst @@ -1,56 +1,65 @@ -.. _cn_api_paddle_tensor_split +.. _cn_api_fluid_layers_split: + split ------------------------------- -.. py:function:: paddle.tensor.split(x, num_or_sections, axis=0, name=None) +.. py:function:: paddle.fluid.layers.split(input, num_or_sections, dim=-1, name=None) + 该OP将输入Tensor分割成多个子Tensor。 -**参数**: - - **x** (Tensor) - 输入变量,数据类型为bool, float16, float32,float64,int32,int64的多维Tensor。 - - **num_or_sections** (int|list|tuple) - 如果 ``num_or_sections`` 是一个整数,则表示Tensor平均划分为相同大小子Tensor的数量。如果 ``num_or_sections`` 是一个list或tuple,那么它的长度代表子Tensor的数量,它的元素可以是整数或者形状为[1]的Tensor,依次代表子Tensor需要分割成的维度的大小。list或tuple的长度不能超过输入Tensor待分割的维度的大小。在list或tuple中,至多有一个元素值为-1,表示该值是由 ``x`` 的维度和其他 ``num_or_sections`` 中元素推断出来的。例如对一个维度为[4,6,6]Tensor的第三维进行分割时,指定 ``num_or_sections=[2,-1,1]`` ,输出的三个Tensor维度分别为:[4,6,2],[4,6,3],[4,6,1]。 - - **axis** (int|Tensor,可选) - 整数或者形状为[1]的Tensor,数据类型为int32或int64。表示需要分割的维度。如果 ``axis < 0`` ,则划分的维度为 ``rank(x) + axis`` 。默认值为0。 - - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 +参数: + - **input** (Tensor) - 输入变量,数据类型为bool, float16,float32,float64,int32,int64的多维Tensor。 + - **num_or_sections** (int|list|tuple) - 如果 ``num_or_sections`` 是一个整数,则表示Tensor平均划分为相同大小子Tensor的数量。如果 ``num_or_sections`` 是一个list或tuple,那么它的长度代表子Tensor的数量,它的元素可以是整数或者形状为[1]的Tensor,依次代表子Tensor需要分割成的维度的大小。list或tuple的长度不能超过输入Tensor待分割的维度的大小。至多有一个元素值为-1,-1表示该值是由 ``input`` 待分割的维度值和 ``num_or_sections`` 的剩余元素推断出来的。 + - **dim** (int|Tenspr,可选) - 整数或者形状为[1]的Tensor,数据类型为int32或int64。表示需要分割的维度。如果 ``dim < 0`` ,则划分的维度为 ``rank(input) + dim`` 。默认值为-1。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 返回:分割后的Tensor列表。 + 抛出异常: - - :code:`TypeError`:``x`` 的数据类型不是float16、float32、float64、int32或int64时 。 + - :code:`TypeError`:``input`` 的数据类型不是bool、float16、float32、float64、int32或int64时 。 - :code:`TypeError`:``num_or_sections`` 不是int、list 或 tuple时。 - - :code:`TypeError`:``axis`` 不是 int 或 Tensor时。当 ``axis`` 为Tensor,其数据类型不是int32或int64时。 + - :code:`TypeError`:``dim`` 不是 int 或 Tensor时。当 ``dim`` 为Tensor,其数据类型不是int32或int64时。 **代码示例**: .. code-block:: python - import numpy as np - import paddle - - paddle.enable_imperative() - # x is a Tensor which shape is [3, 9, 5] - x_np = np.random.random([3, 9, 5]).astype("int32") - x = paddle.imperative.to_variable(x_np) + import paddle.fluid as fluid - out0, out1, out22 = paddle.split(x, num_or_sections=3, axis=1) + # input is a Tensor which shape is [3, 9, 5] + input = fluid.data( + name="input", shape=[3, 9, 5], dtype="float32") + + out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=1) # out0.shape [3, 3, 5] # out1.shape [3, 3, 5] # out2.shape [3, 3, 5] - out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, 4], axis=1) + out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=1) # out0.shape [3, 2, 5] # out1.shape [3, 3, 5] # out2.shape [3, 4, 5] - out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, -1], axis=1) + out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, -1], dim=1) # out0.shape [3, 2, 5] # out1.shape [3, 3, 5] # out2.shape [3, 4, 5] - # axis is negative, the real axis is (rank(x) + axis) which real + # dim is negative, the real dim is (rank(input) + axis) which real # value is 1. - out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=-2) + out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=-2) # out0.shape [3, 3, 5] # out1.shape [3, 3, 5] # out2.shape [3, 3, 5] + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/diag_cn.rst b/doc/paddle/api/paddle/diag_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/diag_cn.rst rename to doc/paddle/api/paddle/diag_cn.rst diff --git a/doc/paddle/api/paddle/fleet/DatasetFactory_cn.rst b/doc/paddle/api/paddle/distributed/fleet/DatasetFactory_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fleet/DatasetFactory_cn.rst rename to doc/paddle/api/paddle/distributed/fleet/DatasetFactory_cn.rst diff --git a/doc/paddle/api/paddle/fleet/InMemoryDataset_cn.rst b/doc/paddle/api/paddle/distributed/fleet/InMemoryDataset_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fleet/InMemoryDataset_cn.rst rename to doc/paddle/api/paddle/distributed/fleet/InMemoryDataset_cn.rst diff --git a/doc/paddle/api/paddle/fleet/QueueDataset_cn.rst b/doc/paddle/api/paddle/distributed/fleet/QueueDataset_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fleet/QueueDataset_cn.rst rename to doc/paddle/api/paddle/distributed/fleet/QueueDataset_cn.rst diff --git a/doc/paddle/api/paddle/fleet/meta_optimizers/LambOptimizer_cn.rst b/doc/paddle/api/paddle/distributed/fleet/meta_optimizers/LambOptimizer_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fleet/meta_optimizers/LambOptimizer_cn.rst rename to doc/paddle/api/paddle/distributed/fleet/meta_optimizers/LambOptimizer_cn.rst diff --git a/doc/paddle/api/paddle/fleet/meta_optimizers/RecomputeOptimizer_cn.rst b/doc/paddle/api/paddle/distributed/fleet/meta_optimizers/RecomputeOptimizer_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fleet/meta_optimizers/RecomputeOptimizer_cn.rst rename to doc/paddle/api/paddle/distributed/fleet/meta_optimizers/RecomputeOptimizer_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_add_cn.rst b/doc/paddle/api/paddle/elementwise_add_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/elementwise_add_cn.rst rename to doc/paddle/api/paddle/elementwise_add_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_div_cn.rst b/doc/paddle/api/paddle/elementwise_div_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/elementwise_div_cn.rst rename to doc/paddle/api/paddle/elementwise_div_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_floordiv_cn.rst b/doc/paddle/api/paddle/elementwise_floordiv_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/elementwise_floordiv_cn.rst rename to doc/paddle/api/paddle/elementwise_floordiv_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_mod_cn.rst b/doc/paddle/api/paddle/elementwise_mod_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/elementwise_mod_cn.rst rename to doc/paddle/api/paddle/elementwise_mod_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_pow_cn.rst b/doc/paddle/api/paddle/elementwise_pow_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/elementwise_pow_cn.rst rename to doc/paddle/api/paddle/elementwise_pow_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_sub_cn.rst b/doc/paddle/api/paddle/elementwise_sub_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/elementwise_sub_cn.rst rename to doc/paddle/api/paddle/elementwise_sub_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/erf_cn.rst b/doc/paddle/api/paddle/erf_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/erf_cn.rst rename to doc/paddle/api/paddle/erf_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/exp_cn.rst b/doc/paddle/api/paddle/exp_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/exp_cn.rst rename to doc/paddle/api/paddle/exp_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/expand_as_cn.rst b/doc/paddle/api/paddle/expand_as_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/expand_as_cn.rst rename to doc/paddle/api/paddle/expand_as_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/expand_cn.rst b/doc/paddle/api/paddle/expand_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/expand_cn.rst rename to doc/paddle/api/paddle/expand_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/fill_constant_cn.rst b/doc/paddle/api/paddle/fill_constant_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/fill_constant_cn.rst rename to doc/paddle/api/paddle/fill_constant_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/floor_cn.rst b/doc/paddle/api/paddle/floor_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/floor_cn.rst rename to doc/paddle/api/paddle/floor_cn.rst diff --git a/doc/paddle/api/paddle/fluid/BuildStrategy_cn.rst b/doc/paddle/api/paddle/fluid/BuildStrategy_cn.rst deleted file mode 100644 index 2d6f2fa05..000000000 --- a/doc/paddle/api/paddle/fluid/BuildStrategy_cn.rst +++ /dev/null @@ -1,198 +0,0 @@ -.. _cn_api_fluid_BuildStrategy: - -BuildStrategy -------------------------------- - - -.. py:class:: paddle.fluid.BuildStrategy - -:api_attr: 声明式编程模式(静态图) - - - -``BuildStrategy`` 使用户更方便地控制 :ref:`cn_api_fluid_ParallelExecutor` 中计算图的建造方法,可通过设置 ``ParallelExecutor`` 中的 ``BuildStrategy`` 成员来实现此功能。 - -**代码示例** - -.. code-block:: python - - import os - import numpy as np - import paddle.fluid as fluid - - os.environ["CPU_NUM"] = '2' - places = fluid.cpu_places() - - data = fluid.layers.data(name="x", shape=[1], dtype="float32") - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) - - build_strategy = fluid.BuildStrategy() - build_strategy.enable_inplace = True - build_strategy.memory_optimize = True - build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce - program = fluid.compiler.CompiledProgram(fluid.default_main_program()) - program = program.with_data_parallel(loss_name=loss.name, - build_strategy=build_strategy, - places=places) - - -.. py:attribute:: debug_graphviz_path - -str类型。表示以graphviz格式向文件中写入计算图的路径,有利于调试。默认值为空字符串。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - build_strategy = fluid.BuildStrategy() - build_strategy.debug_graphviz_path = "./graph" - - -.. py:attribute:: enable_sequential_execution - -bool类型。如果设置为True,则算子的执行顺序将与算子定义的执行顺序相同。默认为False。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - build_strategy = fluid.BuildStrategy() - build_strategy.enable_sequential_execution = True - - -.. py:attribute:: fuse_broadcast_ops - -bool类型。表明是否融合(fuse) broadcast ops。该选项指在Reduce模式下有效,使程序运行更快。默认为False。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - build_strategy = fluid.BuildStrategy() - build_strategy.fuse_broadcast_ops = True - - -.. py:attribute:: fuse_elewise_add_act_ops - -bool类型。表明是否融合(fuse) elementwise_add_op和activation_op。这会使整体执行过程更快。默认为False。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - build_strategy = fluid.BuildStrategy() - build_strategy.fuse_elewise_add_act_ops = True - - -.. py:attribute:: fuse_relu_depthwise_conv - -bool类型。表明是否融合(fuse) relu和depthwise_conv2d,节省GPU内存并可能加速执行过程。此选项仅适用于GPU设备。默认为False。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - build_strategy = fluid.BuildStrategy() - build_strategy.fuse_relu_depthwise_conv = True - -.. py:attribute:: gradient_scale_strategy - -``fluid.BuildStrategy.GradientScaleStrategy`` 类型。在 ``ParallelExecutor`` 中,存在三种定义loss对应梯度( *loss@grad* )的方式,分别为 ``CoeffNumDevice``, ``One`` 与 ``Customized``。默认情况下, ``ParallelExecutor`` 根据设备数目来设置 *loss@grad* 。如果用户需要自定义 *loss@grad* ,可以选择 ``Customized`` 方法。默认为 ``CoeffNumDevice`` 。 - -**代码示例** - -.. code-block:: python - - import os - import numpy as np - import paddle.fluid as fluid - import paddle.fluid.compiler as compiler - - use_cuda = True - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - - # NOTE: 如果你使用CPU计算,需要指定CPU_NUM, 否则,fluid - # 将使用所有的核的数目作为CPU_NUM, - # 这种情况下,输入的batch size应该大于CPU_NUM, 否则, - # 进程将会因为异常而失败。 - if not use_cuda: - os.environ['CPU_NUM'] = str(2) - places = fluid.cpu_places() - else: - places = places = fluid.cuda_places() - - data = fluid.layers.data(name='X', shape=[1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) - - fluid.default_startup_program().random_seed=1 - exe.run(fluid.default_startup_program()) - - build_strategy = fluid.BuildStrategy() - build_strategy.gradient_scale_strategy = \ - fluid.BuildStrategy.GradientScaleStrategy.Customized - compiled_prog = compiler.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy, - places = places) - - dev_count = len(places) - x = np.random.random(size=(10, 1)).astype('float32') - loss_grad = np.ones((dev_count)).astype("float32") * 0.01 - loss_grad_name = loss.name+"@GRAD" - loss_data = exe.run(compiled_prog, - feed={"X": x, loss_grad_name : loss_grad}, - fetch_list=[loss.name, loss_grad_name]) - -.. py:attribute:: memory_optimize - -bool类型或None。设为True时可用于减少总内存消耗,False表示不使用,None表示框架会自动选择使用或者不使用优化策略。当前,None意味着当GC不能使用时,优化策略将被使用。默认为None。 - -.. py:attribute:: reduce_strategy - -``fluid.BuildStrategy.ReduceStrategy`` 类型。在 ``ParallelExecutor`` 中,存在两种参数梯度聚合策略,即 ``AllReduce`` 和 ``Reduce`` 。如果用户需要在所有执行设备上独立地进行参数更新,可以使用 ``AllReduce`` 。如果使用 ``Reduce`` 策略,所有参数的优化将均匀地分配给不同的执行设备,随之将优化后的参数广播给其他执行设备。 -默认值为 ``AllReduce`` 。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - build_strategy = fluid.BuildStrategy() - build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce - -.. py:attribute:: remove_unnecessary_lock - -bool类型。设置True会去除GPU操作中的一些锁操作, ``ParallelExecutor`` 将运行得更快,默认为True。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - build_strategy = fluid.BuildStrategy() - build_strategy.remove_unnecessary_lock = True - - -.. py:attribute:: sync_batch_norm - -bool类型。表示是否使用同步的批正则化,即在训练阶段通过多个设备同步均值和方差。当前的实现不支持FP16训练和CPU。并且目前**仅支持**仅在一台机器上进行同步式批正则。默认为 False。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - build_strategy = fluid.BuildStrategy() - build_strategy.sync_batch_norm = True - - diff --git a/doc/paddle/api/paddle/fluid/CPUPlace_cn.rst b/doc/paddle/api/paddle/fluid/CPUPlace_cn.rst new file mode 100644 index 000000000..e091352c9 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/CPUPlace_cn.rst @@ -0,0 +1,20 @@ +.. _cn_api_fluid_CPUPlace: + +CPUPlace +------------------------------- + +.. py:class:: paddle.fluid.CPUPlace + + + + +``CPUPlace`` 是一个设备描述符,表示一个分配或将要分配 ``Tensor`` 或 ``LoDTensor`` 的 ``CPU`` 设备。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + cpu_place = fluid.CPUPlace() + + diff --git a/doc/paddle/api/paddle/fluid/CUDAPinnedPlace_cn.rst b/doc/paddle/api/paddle/fluid/CUDAPinnedPlace_cn.rst new file mode 100644 index 000000000..a3e669344 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/CUDAPinnedPlace_cn.rst @@ -0,0 +1,20 @@ +.. _cn_api_fluid_CUDAPinnedPlace: + +CUDAPinnedPlace +------------------------------- + +.. py:class:: paddle.fluid.CUDAPinnedPlace + + + + +``CUDAPinnedPlace`` 是一个设备描述符,它所指代的页锁定内存由 CUDA 函数 ``cudaHostAlloc()`` 在主机内存上分配,主机的操作系统将不会对这块内存进行分页和交换操作,可以通过直接内存访问技术访问,加速主机和 GPU 之间的数据拷贝。 +有关 CUDA 的数据转移和 ``pinned memory``,参见 `官方文档 `_ 。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + place = fluid.CUDAPinnedPlace() + diff --git a/doc/paddle/api/paddle/fluid/CUDAPlace_cn.rst b/doc/paddle/api/paddle/fluid/CUDAPlace_cn.rst new file mode 100644 index 000000000..ba7cf6228 --- /dev/null +++ b/doc/paddle/api/paddle/fluid/CUDAPlace_cn.rst @@ -0,0 +1,33 @@ +.. _cn_api_fluid_CUDAPlace: + +CUDAPlace +------------------------------- + +.. py:class:: paddle.fluid.CUDAPlace + + + + +.. note:: + 多卡任务请先使用 FLAGS_selected_gpus 环境变量设置可见的GPU设备,下个版本将会修正 CUDA_VISIBLE_DEVICES 环境变量无效的问题。 + +``CUDAPlace`` 是一个设备描述符,表示一个分配或将要分配 ``Tensor`` 或 ``LoDTensor`` 的 GPU 设备。 +每个 ``CUDAPlace`` 有一个 ``dev_id`` (设备id)来表明当前的 ``CUDAPlace`` 所代表的显卡编号,编号从 0 开始。 +``dev_id`` 不同的 ``CUDAPlace`` 所对应的内存不可相互访问。 +这里编号指的是可见显卡的逻辑编号,而不是显卡实际的编号。 +可以通过 ``CUDA_VISIBLE_DEVICES`` 环境变量限制程序能够使用的 GPU 设备,程序启动时会遍历当前的可见设备,并从 0 开始为这些设备编号。 +如果没有设置 ``CUDA_VISIBLE_DEVICES``,则默认所有的设备都是可见的,此时逻辑编号与实际编号是相同的。 + +参数: + - **id** (int,可选) - GPU的设备ID。如果为 ``None``,则默认会使用 id 为 0 的设备。默认值为 ``None``。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + gpu_place = fluid.CUDAPlace(0) + + + + diff --git a/doc/paddle/api/paddle/fluid/CompiledProgram_cn.rst b/doc/paddle/api/paddle/fluid/CompiledProgram_cn.rst deleted file mode 100644 index c6576c634..000000000 --- a/doc/paddle/api/paddle/fluid/CompiledProgram_cn.rst +++ /dev/null @@ -1,114 +0,0 @@ -.. _cn_api_fluid_CompiledProgram: - -CompiledProgram -------------------------------- - - -.. py:class:: paddle.fluid.CompiledProgram(program_or_graph, build_strategy=None) - -:api_attr: 声明式编程模式(静态图) - - - -CompiledProgram根据 `build_strategy` 的配置将输入的Program或Graph进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等,关于build_strategy更多信息。请参阅 ``fluid.BuildStrategy`` 。 - -参数: - - **program_or_graph** (Graph|Program): 该参数为被执行的Program或Graph。 - - **build_strategy** (BuildStrategy): 通过配置build_strategy,对计算图进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等。关于build_strategy更多信息,请参阅 ``fluid.BuildStrategy`` 。 默认为None。 - -返回:初始化后的 ``CompiledProgram`` 对象 - -返回类型:CompiledProgram - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy - - place = fluid.CUDAPlace(0) # fluid.CPUPlace() - exe = fluid.Executor(place) - - data = fluid.data(name='X', shape=[None, 1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) - - exe.run(fluid.default_startup_program()) - compiled_prog = fluid.CompiledProgram( - fluid.default_main_program()) - - x = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = exe.run(compiled_prog, - feed={"X": x}, - fetch_list=[loss.name]) - - -.. py:method:: with_data_parallel(loss_name=None, build_strategy=None, exec_strategy=None, share_vars_from=None, places=None) - -该接口用于将输入的Program或Graph进行转换,以便通过数据并行模式运行该模型。用户可以通过 `build_strategy` 和 `exec_strategy` 设置计算图构建和计算图执行过程中可以进行的一些优化,例如:将梯度聚合的AllReduce操作进行融合、指定计算图运行过程中使用的线程池大小等。**注意:如果在构建CompiledProgram和调用with_data_parallel时都指定了build_strategy,在CompiledProgram中的build_strategy会被复写,因此,如果是数据并行训练,建议在调用with_data_parallel接口时设置build_strategy**。 - -参数: - - **loss_name** (str) - 该参数为模型最后得到的损失变量的名字,**注意:如果是模型训练,必须设置loss_name,否则计算结果可能会有问题。** 默认为:None。 - - **build_strategy** (BuildStrategy): 通过配置build_strategy,对计算图进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等。关于build_strategy更多的信息,请参阅 ``fluid.BuildStrategy`` 。 默认为:None。 - - **exec_strategy** (ExecutionStrategy) - 通过exec_strategy指定执行计算图过程可以调整的选项,例如线程池大小等。 关于exec_strategy更多信息,请参阅 ``fluid.ExecutionStrategy`` 。 默认为:None。 - - **share_vars_from** (CompiledProgram) - 如果设置了share_vars_from,当前的CompiledProgram将与share_vars_from指定的CompiledProgram共享参数值。需要设置该参数的情况:模型训练过程中需要进行模型测试,并且训练和测试都是采用数据并行模式,那么测试对应的CompiledProgram在调用with_data_parallel时,需要将share_vars_from设置为训练对应的CompiledProgram。由于CompiledProgram只有在第一次执行时才会将变量分发到其他设备上,因此share_vars_from指定的CompiledProgram必须在当前CompiledProgram之前运行。默认为:None。 - - **places** (list(CUDAPlace)|list(CPUPlace)) - 该参数指定模型运行所在的设备。如果希望在GPU0和GPU1上运行,places为[fluid.CUDAPlace(0), fluid.CUDAPlace(1)];如果希望使用2个CPU运行,places为[fluid.CPUPlace()] * 2。 如果没有设置该参数,即该参数为None,模型执行时,将从环境变量中获取可用的设备:如果使用GPU,模型执行时,从环境变量FLAGS_selected_gpus或CUDA_VISIBLE_DEVICES中获取当前可用的设备ID;如果使用CPU,模型执行时,从环境变量CPU_NUM中获取当前可利用的CPU个数。例如:export CPU_NUM=4,如果没有设置该环境变量,执行器会在环境变量中添加该变量,并将其值设为1。默认为:None。 - -返回:配置之后的 ``CompiledProgram`` 对象 - -返回类型:CompiledProgram - -.. note:: - 1. 如果只是进行多卡测试,不需要设置loss_name以及share_vars_from。 - 2. 如果程序中既有模型训练又有模型测试,则构建模型测试所对应的CompiledProgram时必须设置share_vars_from,否则模型测试和模型训练所使用的参数是不一致。 - - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy - import os - - use_cuda = True - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - parallel_places = [fluid.CUDAPlace(0), fluid.CUDAPlace(1)] if use_cuda else [fluid.CPUPlace()] * 2 - - # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, - # 否则fluid会把逻辑核的所有数目设为CPU_NUM, - # 在这种情况下,输入的batch size应大于CPU_NUM, - # 否则程序会异常中断。 - if not use_cuda: - os.environ['CPU_NUM'] = str(2) - - exe = fluid.Executor(place) - - data = fluid.data(name='X', shape=[None, 1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - - test_program = fluid.default_main_program().clone(for_test=True) - fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) - - exe.run(fluid.default_startup_program()) - compiled_train_prog = fluid.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - loss_name=loss.name, places=parallel_places) - # 注意:如果此处不设置share_vars_from=compiled_train_prog, - # 测试过程中用的参数与训练使用的参数是不一致 - compiled_test_prog = fluid.CompiledProgram( - test_program).with_data_parallel( - share_vars_from=compiled_train_prog, - places=parallel_places) - - train_data = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = exe.run(compiled_train_prog, - feed={"X": train_data}, - fetch_list=[loss.name]) - test_data = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = exe.run(compiled_test_prog, - feed={"X": test_data}, - fetch_list=[loss.name]) \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/ExecutionStrategy_cn.rst b/doc/paddle/api/paddle/fluid/ExecutionStrategy_cn.rst deleted file mode 100644 index 25b623fbf..000000000 --- a/doc/paddle/api/paddle/fluid/ExecutionStrategy_cn.rst +++ /dev/null @@ -1,65 +0,0 @@ -.. _cn_api_fluid_ExecutionStrategy: - -ExecutionStrategy -------------------------------- - - -.. py:class:: paddle.fluid.ExecutionStrategy - -:api_attr: 声明式编程模式(静态图) - - - -通过设置 ``ExecutionStrategy`` 中的选项,用户可以对执行器的执行配置进行调整,比如设置执行器中线程池的大小等。 - -返回:初始化后的ExecutionStrategy的实例 - -返回类型:ExecutionStrategy - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - x = fluid.layers.data(name='x', shape=[13], dtype='float32') - y = fluid.layers.data(name='y', shape=[1], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, act=None) - - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_loss = fluid.layers.mean(cost) - - sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) - sgd_optimizer.minimize(avg_loss) - - exec_strategy = fluid.ExecutionStrategy() - exec_strategy.num_threads = 4 - - train_exe = fluid.ParallelExecutor(use_cuda=False, - loss_name=avg_loss.name, - exec_strategy=exec_strategy) - - -.. py:attribute:: num_iteration_per_drop_scope - -int型成员。该选项表示间隔多少次迭代之后清理一次临时变量。模型运行过程中,生成的中间临时变量将被放到local execution scope中,为了避免对临时变量频繁的申请与释放,通常将其设为较大的值(比如10或者100)。默认值为100。 - - -.. py:attribute:: num_iteration_per_run - -int型成员。它配置了当用户在python脚本中调用pe.run()时执行器会执行的迭代次数。Executor每次调用,会进行num_iteration_per_run次训练,它会使整体执行过程更快。 - -.. py:attribute:: num_threads - -int型成员。该选项表示当前 ``Executor`` 的线程池(thread pool)的大小, 此线程池可用来并发执行program中的operator(算子,运算)。如果 :math:`num\_threads=1` ,则所有的operator将一个接一个地执行,但在不同的program重复周期(iterations)中执行顺序可能不同。如果该选项没有被设置,则在 ``Executor`` 中,它会依据设备类型(device type)、设备数目(device count)而设置为相应值。对GPU,:math:`num\_threads=device\_count∗4` ;对CPU, :math:`num\_threads=CPU\_NUM∗4` 。在 ``Executor`` 中有关于 :math:`CPU\_NUM` 的详细解释。如果没有设置 :math:`CPU\_NUM` ,则设置默认值为1, 并提示用户进行 :math:`CPU\_NUM` 的设置。 - - - - - - - - - - - - diff --git a/doc/paddle/api/paddle/fluid/Executor_cn.rst b/doc/paddle/api/paddle/fluid/Executor_cn.rst deleted file mode 100644 index 7a2053a89..000000000 --- a/doc/paddle/api/paddle/fluid/Executor_cn.rst +++ /dev/null @@ -1,272 +0,0 @@ -.. _cn_api_fluid_executor: - -Executor -------------------------------- - - - -.. py:class:: paddle.fluid.Executor (place=None) - -:api_attr: 声明式编程模式(静态图) - - - -Executor支持单GPU、多GPU以及CPU运行。 - -参数: - - **place** (fluid.CPUPlace()|fluid.CUDAPlace(N)|None) – 该参数表示Executor执行所在的设备,这里的N为GPU对应的ID。当该参数为 `None` 时,PaddlePaddle会根据其安装版本设置默认的运行设备。当安装的Paddle为CPU版时,默认运行设置会设置成 `CPUPlace()` ,而当Paddle为GPU版时,默认运行设备会设置成 `CUDAPlace(0)` 。默认值为None。 - -返回:初始化后的 ``Executor`` 对象 - -返回类型:Executor - -**示例代码** - -.. code-block:: python - - import paddle.fluid as fluid - import paddle.fluid.compiler as compiler - import numpy - import os - - # 显式设置运行设备 - # use_cuda = True - # place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - # exe = fluid.Executor(place) - - # 如果不显示设置运行设备,PaddlePaddle会设置默认运行设备 - exe = fluid.Executor() - - train_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): - data = fluid.layers.data(name='X', shape=[1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) - - # 仅运行一次startup program - # 不需要优化/编译这个startup program - startup_program.random_seed=1 - exe.run(startup_program) - - # 无需编译,直接运行main program - x = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = exe.run(train_program, - feed={"X": x}, - fetch_list=[loss.name]) - - # 另一种方法是,编译这个main program然后运行。 - # 参考CompiledProgram以获取更多信息。 - # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, - # 否则fluid会把逻辑核的所有数目设为CPU_NUM, - # 在这种情况下,输入的batch size应大于CPU_NUM, - # 否则程序会异常中断。 - - # 显式设置运行设备 - # if not use_cuda: - # os.environ['CPU_NUM'] = str(2) - - # 未显示设置运行设备且安装的Paddle为CPU版本 - os.environ['CPU_NUM'] = str(2) - - compiled_prog = compiler.CompiledProgram( - train_program).with_data_parallel( - loss_name=loss.name) - loss_data, = exe.run(compiled_prog, - feed={"X": x}, - fetch_list=[loss.name]) - -.. py:method:: close() - - -关闭执行器。该接口主要用于对于分布式训练,调用该接口后不可以再使用该执行器。该接口会释放在PServers上和目前Trainer有关联的资源。 - -返回:无 - -**示例代码** - -.. code-block:: python - - import paddle.fluid as fluid - - cpu = fluid.CPUPlace() - exe = fluid.Executor(cpu) - # 执行训练或测试过程 - exe.close() - - -.. py:method:: run(program=None, feed=None, fetch_list=None, feed_var_name='feed', fetch_var_name='fetch', scope=None, return_numpy=True, use_program_cache=False, return_merged=True) - -执行指定的Program或者CompiledProgram。需要注意的是,执行器会执行Program或CompiledProgram中的所有算子,而不会根据fetch_list对Program或CompiledProgram中的算子进行裁剪。同时,需要传入运行该模型用到的scope,如果没有指定scope,执行器将使用全局scope,即fluid.global_scope()。 - -参数: - - **program** (Program|CompiledProgram) – 该参数为被执行的Program或CompiledProgram,如果未提供该参数,即该参数为None,在该接口内,main_program将被设置为fluid.default_main_program()。默认为:None。 - - **feed** (list|dict) – 该参数表示模型的输入变量。如果是单卡训练,``feed`` 为 ``dict`` 类型,如果是多卡训练,参数 ``feed`` 可以是 ``dict`` 或者 ``list`` 类型变量,如果该参数类型为 ``dict`` ,feed中的数据将会被分割(split)并分送给多个设备(CPU/GPU),即输入数据被均匀分配到不同设备上;如果该参数类型为 ``list`` ,则列表中的各个元素都会直接分别被拷贝到各设备中。默认为:None。 - - **fetch_list** (list) – 该参数表示模型运行之后需要返回的变量。默认为:None。 - - **feed_var_name** (str) – 该参数表示数据输入算子(feed operator)的输入变量名称。默认为:"feed"。 - - **fetch_var_name** (str) – 该参数表示结果获取算子(fetch operator)的输出变量名称。默认为:"fetch"。 - - **scope** (Scope) – 该参数表示执行当前program所使用的作用域,用户可以为不同的program指定不同的作用域。默认值:fluid.global_scope()。 - - **return_numpy** (bool) – 该参数表示是否将返回的计算结果(fetch list中指定的变量)转化为numpy;如果为False,则每个变量返回的类型为LoDTensor,否则返回变量的类型为numpy.ndarray。默认为:True。 - - **use_program_cache** (bool) – 该参数表示是否对输入的Program进行缓存。如果该参数为True,在以下情况时,模型运行速度可能会更快:输入的program为 ``fluid.Program`` ,并且模型运行过程中,调用该接口的参数(program、 feed变量名和fetch_list变量)名始终不变。默认为:False。 - - **return_merged** (bool) – 该参数表示是否按照执行设备维度将返回的计算结果(fetch list中指定的变量)进行合并。如果 ``return_merged`` 设为False,返回值类型是一个Tensor的二维列表( ``return_numpy`` 设为Fasle时)或者一个numpy.ndarray的二维列表( ``return_numpy`` 设为True时)。如果 ``return_merged`` 设为True,返回值类型是一个Tensor的一维列表( ``return_numpy`` 设为Fasle时)或者一个numpy.ndarray的一维列表( ``return_numpy`` 设为True时)。更多细节请参考示例代码2。如果返回的计算结果是变长的,请设置 ``return_merged`` 为False,即不按照执行设备维度合并返回的计算结果。该参数的默认值为True,但这仅是为了兼容性考虑,在未来的版本中默认值可能会更改为False。 - -返回:返回fetch_list中指定的变量值 - -返回类型:List - -.. note:: - 1. 如果是多卡训练,并且feed参数为dict类型,输入数据将被均匀分配到不同的卡上,例如:使用2块GPU训练,输入样本数为3,即[0, 1, 2],经过拆分之后,GPU0上的样本数为1,即[0],GPU1上的样本数为2,即[1, 2]。如果样本数少于设备数,程序会报错,因此运行模型时,应额外注意数据集的最后一个batch的样本数是否少于当前可用的CPU核数或GPU卡数,如果是少于,建议丢弃该batch。 - 2. 如果可用的CPU核数或GPU卡数大于1,则fetch出来的结果为不同设备上的相同变量值(fetch_list中的变量)在第0维拼接在一起。 - - -**示例代码1** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy - - #首先创建执行引擎 - place = fluid.CPUPlace() # fluid.CUDAPlace(0) - exe = fluid.Executor(place) - - data = fluid.layers.data(name='X', shape=[1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - adam = fluid.optimizer.Adam() - adam.minimize(loss) - - #仅运行startup程序一次 - exe.run(fluid.default_startup_program()) - - x = numpy.random.random(size=(10, 1)).astype('float32') - outs = exe.run(feed={'X': x}, - fetch_list=[loss.name]) - - -**示例代码2** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - # 创建Executor对象 - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) - data = fluid.data(name='X', shape=[None, 1], dtype='float32') - class_dim = 2 - prediction = fluid.layers.fc(input=data, size=class_dim) - loss = fluid.layers.mean(prediction) - adam = fluid.optimizer.Adam() - adam.minimize(loss) - # 运行且仅运行一次startup program - exe.run(fluid.default_startup_program()) - build_strategy = fluid.BuildStrategy() - binary = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel( - loss_name=loss.name, build_strategy=build_strategy) - batch_size = 6 - x = np.random.random(size=(batch_size, 1)).astype('float32') - # 1) 设置 return_merged 参数为False以获取不合并的计算结果: - unmerged_prediction, = exe.run(binary, feed={'X': x}, - fetch_list=[prediction.name], - return_merged=False) - # 如果用户使用两个GPU卡来运行此python代码示例,输出结果将为(2, 3, class_dim)。 - # 输出结果中第一个维度值代表所使用的GPU卡数,而第二个维度值代表batch_size和所使用 - # 的GPU卡数之商。 - print("The unmerged prediction shape: {}".format(np.array(unmerged_prediction).shape)) - print(unmerged_prediction) - # 2) 设置 return_merged 参数为True以获取合并的计算结果: - merged_prediction, = exe.run(binary, feed={'X': x}, - fetch_list=[prediction.name], - return_merged=True) - # 如果用户使用两个GPU卡来运行此python代码示例,输出结果将为(6, class_dim)。输出结果 - # 中第一个维度值代表batch_size值。 - print("The merged prediction shape: {}".format(np.array(merged_prediction).shape)) - print(merged_prediction) - # 输出: - # The unmerged prediction shape: (2, 3, 2) - # [array([[-0.37620035, -0.19752218], - # [-0.3561043 , -0.18697084], - # [-0.24129935, -0.12669306]], dtype=float32), array([[-0.24489994, -0.12858354], - # [-0.49041364, -0.25748932], - # [-0.44331917, -0.23276259]], dtype=float32)] - # The merged prediction shape: (6, 2) - # [[-0.37789783 -0.19921964] - # [-0.3577645 -0.18863106] - # [-0.24274671 -0.12814042] - # [-0.24635398 -0.13003758] - # [-0.49232286 -0.25939852] - # [-0.44514108 -0.2345845 ]] - - -.. py:method:: infer_from_dataset(program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100) - -infer_from_dataset的文档与train_from_dataset几乎完全相同,只是在分布式训练中,推进梯度将在infer_from_dataset中禁用。 infer_from_dataset()可以非常容易地用于多线程中的评估。 - -参数: - - **program** (Program|CompiledProgram) – 需要执行的program,如果没有给定那么默认使用default_main_program (未编译的) - - **dataset** (paddle.fluid.Dataset) – 在此函数外创建的数据集,用户应当在调用函数前提供完整定义的数据集。必要时请检查Dataset文件。默认为None - - **scope** (Scope) – 执行这个program的域,用户可以指定不同的域。默认为全局域 - - **thread** (int) – 用户想要在这个函数中运行的线程数量。线程的实际数量为min(Dataset.thread_num, thread),如果thread > 0,默认为0 - - **debug** (bool) – 是否开启debug模式,默认为False - - **fetch_list** (Variable List) – 返回变量列表,每个变量都会在训练过程中被打印出来,默认为None - - **fetch_info** (String List) – 每个变量的打印信息,默认为None - - **print_period** (int) – 每两次打印之间间隔的mini-batches的数量,默认为100 - -返回:None - -**示例代码** - -.. code-block:: python - - import paddle.fluid as fluid - place = fluid.CPUPlace() # 使用GPU时可设置place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) - x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64") - y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1) - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_use_var([x, y]) - dataset.set_thread(1) - filelist = [] # 您可以设置您自己的filelist,如filelist = ["dataA.txt"] - dataset.set_filelist(filelist) - exe.run(fluid.default_startup_program()) - exe.infer_from_dataset(program=fluid.default_main_program(),dataset=dataset) - - -.. py:method:: train_from_dataset(program=None, dataset=None, scope=None, thread=0, debug=False, fetch_list=None, fetch_info=None, print_period=100) - -从预定义的数据集中训练。 数据集在paddle.fluid.dataset中定义。 给定程序(或编译程序),train_from_dataset将使用数据集中的所有数据样本。 输入范围可由用户给出。 默认情况下,范围是global_scope()。训练中的线程总数是thread。 训练中使用的线程数将是数据集中threadnum的最小值,同时也是此接口中线程的值。 可以设置debug,以便执行器显示所有算子的运行时间和当前训练任务的吞吐量。 - -注意:train_from_dataset将销毁每次运行在executor中创建的所有资源。 - -参数: - - **program** (Program|CompiledProgram) – 需要执行的program,如果没有给定那么默认使用default_main_program (未编译的) - - **dataset** (paddle.fluid.Dataset) – 在此函数外创建的数据集,用户应当在调用函数前提供完整定义的数据集。必要时请检查Dataset文件。默认为None - - **scope** (Scope) – 执行这个program的域,用户可以指定不同的域。默认为全局域 - - **thread** (int) – 用户想要在这个函数中运行的线程数量。线程的实际数量为min(Dataset.thread_num, thread),如果thread > 0,默认为0 - - **debug** (bool) – 是否开启debug模式,默认为False - - **fetch_list** (Variable List) – 返回变量列表,每个变量都会在训练过程中被打印出来,默认为None - - **fetch_info** (String List) – 每个变量的打印信息,默认为None - - **print_period** (int) – 每两次打印之间间隔的mini-batches的数量,默认为100 - -返回:None - -**示例代码** - -.. code-block:: python - - import paddle.fluid as fluid - - place = fluid.CPUPlace() # 通过设置place = fluid.CUDAPlace(0)使用GPU - exe = fluid.Executor(place) - x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64") - y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1) - dataset = fluid.DatasetFactory().create_dataset() - dataset.set_use_var([x, y]) - dataset.set_thread(1) - filelist = [] # 您可以设置您自己的filelist,如filelist = ["dataA.txt"] - dataset.set_filelist(filelist) - exe.run(fluid.default_startup_program()) - exe.train_from_dataset(program=fluid.default_main_program(), - dataset=dataset) diff --git a/doc/paddle/api/paddle/fluid/ParallelExecutor_cn.rst b/doc/paddle/api/paddle/fluid/ParallelExecutor_cn.rst deleted file mode 100644 index 8e391956e..000000000 --- a/doc/paddle/api/paddle/fluid/ParallelExecutor_cn.rst +++ /dev/null @@ -1,196 +0,0 @@ -.. _cn_api_fluid_ParallelExecutor: - -ParallelExecutor -------------------------------- - - -.. py:class:: paddle.fluid.ParallelExecutor(use_cuda, loss_name=None, main_program=None, share_vars_from=None, exec_strategy=None, build_strategy=None, num_trainers=1, trainer_id=0, scope=None) - -:api_attr: 声明式编程模式(静态图) - - - -``ParallelExecutor`` 是 ``Executor`` 的一个升级版本,可以支持基于数据并行的多节点模型训练和测试。如果采用数据并行模式, ``ParallelExecutor`` 在构造时会将参数分发到不同的节点上,并将输入的 ``Program`` 拷贝到不同的节点,在执行过程中,各个节点独立运行模型,将模型反向计算得到的参数梯度在多个节点之间进行聚合,之后各个节点独立的进行参数的更新。如果使用GPU运行模型,即 ``use_cuda=True`` ,节点指代GPU, ``ParallelExecutor`` 将自动获取在当前机器上可用的GPU资源,用户也可以通过在环境变量设置可用的GPU资源,例如:希望使用GPU0、GPU1计算,export CUDA_VISIBLEDEVICES=0,1;如果在CPU上进行操作,即 ``use_cuda=False`` ,节点指代CPU,**注意:此时需要用户在环境变量中手动添加 CPU_NUM ,并将该值设置为CPU设备的个数,例如:export CPU_NUM=4,如果没有设置该环境变量,执行器会在环境变量中添加该变量,并将其值设为1**。 - -参数: - - **use_cuda** (bool) – 该参数表示是否使用GPU执行。 - - **loss_name** (str) - 该参数为模型最后得到的损失变量的名字。**注意:如果是数据并行模型训练,必须设置loss_name,否则计算结果可能会有问题。** 默认为:None。 - - **main_program** (Program) – 需要被执行的Program 。如果未提供该参数,即该参数为None,在该接口内,main_program将被设置为fluid.default_main_program()。 默认为:None。 - - **share_vars_from** (ParallelExecutor) - 如果设置了share_vars_from,当前的ParallelExecutor将与share_vars_from指定的ParallelExecutor共享参数值。需要设置该参数的情况:模型训练过程中需要进行模型测试,并且训练和测试都是采用数据并行模式,那么测试对应的ParallelExecutor在调用with_data_parallel时,需要将share_vars_from设置为训练所对应的ParallelExecutor。由于ParallelExecutor只有在第一次执行时才会将参数变量分发到其他设备上,因此share_vars_from指定的ParallelExecutor必须在当前ParallelExecutor之前运行。默认为:None。 - - **exec_strategy** (ExecutionStrategy) - 通过exec_strategy指定执行计算图过程可以调整的选项,例如线程池大小等。 关于exec_strategy更多信息,请参阅 ``fluid.ExecutionStrategy`` 。 默认为:None。 - - **build_strategy** (BuildStrategy): 通过配置build_strategy,对计算图进行转换和优化,例如:计算图中算子融合、计算图执行过程中开启内存/显存优化等。关于build_strategy更多的信息,请参阅 ``fluid.BuildStrategy`` 。 默认为:None。 - - **num_trainers** (int) – 进行GPU分布式训练时需要设置该参数。如果该参数值大于1,NCCL将会通过多层级节点的方式来初始化。每个节点应有相同的GPU数目。默认为:1。 - - **trainer_id** (int) – 进行GPU分布式训练时需要设置该参数。该参数必须与num_trainers参数同时使用。trainer_id指明是当前所在节点的 “rank”(层级)。trainer_id从0开始计数。默认为:0。 - - **scope** (Scope) – 指定执行Program所在的作用域。默认为:fluid.global_scope()。 - -返回:初始化后的 ``ParallelExecutor`` 对象 - -返回类型:ParallelExecutor - -抛出异常:``TypeError`` - - 如果提供的参数 ``share_vars_from`` 不是 ``ParallelExecutor`` 类型的,将会抛出此异常。 - -.. note:: - 1. 如果只是进行多卡测试,不需要设置loss_name以及share_vars_from。 - 2. 如果程序中既有模型训练又有模型测试,则构建模型测试所对应的ParallelExecutor时必须设置share_vars_from,否则模型测试和模型训练所使用的参数是不一致。 - -**示例代码** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy - import os - - use_cuda = True - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - - # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, - # 否则fluid会把逻辑核的所有数目设为CPU_NUM, - # 在这种情况下,输入的batch size应大于CPU_NUM, - # 否则程序会异常中断。 - if not use_cuda: - os.environ['CPU_NUM'] = str(2) - - exe = fluid.Executor(place) - - train_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): - data = fluid.layers.data(name='X', shape=[1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - test_program = fluid.default_main_program().clone(for_test=True) - fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) - - exe.run(startup_program) - - train_exe = fluid.ParallelExecutor(use_cuda=use_cuda, - main_program=train_program, - loss_name=loss.name) - # 注意:如果此处不设置share_vars_from=train_exe,测试过程中用的参数与训练使用的参数是不一致 - test_exe = fluid.ParallelExecutor(use_cuda=use_cuda, - main_program=test_program, - share_vars_from=train_exe) - - train_data = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = train_exe.run(feed={"X": train_data}, - fetch_list=[loss.name]) - - test_data = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = test_exe.run(feed={"X": test_data}, - fetch_list=[loss.name]) - -.. py:method:: run(fetch_list, feed=None, feed_dict=None, return_numpy=True) - -该接口用于运行当前模型,需要注意的是,执行器会执行Program中的所有算子,而不会根据fetch_list对Program中的算子进行裁剪。 - -参数: - - **fetch_list** (list) – 该变量表示模型运行之后需要返回的变量。 - - **feed** (list|dict) – 该变量表示模型的输入变量。如果该参数类型为 ``dict`` ,feed中的数据将会被分割(split)并分送给多个设备(CPU/GPU);如果该参数类型为 ``list`` ,则列表中的各个元素都会直接分别被拷贝到各设备中。默认为:None。 - - **feed_dict** – 该参数已经停止使用。默认为:None。 - - **return_numpy** (bool) – 该变量表示是否将fetched tensor转换为numpy。默认为:True。 - -返回:返回fetch_list中指定的变量值 - -返回类型:List - -抛出异常: - - ``ValueError`` - 如果feed参数是list类型,但是它的长度不等于可用设备(执行场所)的数目,再或者给定的feed不是dict类型,抛出此异常 - - ``TypeError`` - 如果feed参数是list类型,但是它里面的元素不是dict类型时,抛出此异常 - -.. note:: - 1. 如果feed参数为dict类型,输入数据将被均匀分配到不同的卡上,例如:使用2块GPU训练,输入样本数为3,即[0, 1, 2],经过拆分之后,GPU0上的样本数为1,即[0],GPU1上的样本数为2,即[1, 2]。如果样本数少于设备数,程序会报错,因此运行模型时,应额外注意数据集的最后一个batch的样本数是否少于当前可用的CPU核数或GPU卡数,如果是少于,建议丢弃该batch。 - 2. 如果可用的CPU核数或GPU卡数大于1,则fetch出来的结果为不同设备上的相同变量值(fetch_list中的变量)在第0维拼接在一起。 - -**示例代码** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy - import os - - use_cuda = True - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - - # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, - # 否则fluid会把逻辑核的所有数目设为CPU_NUM, - # 在这种情况下,输入的batch size应大于CPU_NUM, - # 否则程序会异常中断。 - if not use_cuda: - os.environ['CPU_NUM'] = str(2) - exe = fluid.Executor(place) - - train_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): - data = fluid.layers.data(name='X', shape=[1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) - - exe.run(startup_program) - - train_exe = fluid.ParallelExecutor(use_cuda=use_cuda, - main_program=train_program, - loss_name=loss.name) - # 如果feed参数是dict类型: - # 图像会被split到设备中。假设有两个设备,那么每个设备将会处理形为 (5, 1)的图像 - x = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = train_exe.run(feed={"X": x}, - fetch_list=[loss.name]) - - # 如果feed参数是list类型: - # 各设备挨个处理列表中的每个元素 - # 第一个设备处理形为 (10, 1) 的图像 - # 第二个设备处理形为 (9, 1) 的图像 - # - # 使用 exe.device_count 得到设备数目 - x1 = numpy.random.random(size=(10, 1)).astype('float32') - x2 = numpy.random.random(size=(9, 1)).astype('float32') - loss_data, = train_exe.run(feed=[{"X": x1}, {"X": x2}], - fetch_list=[loss.name]) - -.. py:method:: drop_local_exe_scopes() - -立即清除scope中的临时变量。模型运行过程中,生成的中间临时变量将被放到local execution scope中,为了避免对临时变量频繁的申请与释放,ParallelExecutor中采取的策略是间隔若干次迭代之后清理一次临时变量。ParallelExecutor在ExecutionStrategy中提供了num_iteration_per_drop_scope选项,该选项表示间隔多少次迭代之后清理一次临时变量。如果num_iteration_per_drop_scope值为100,但是希望在迭代50次之后清理一次临时变量,可以通过手动调用该接口。 - -返回:无 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy - import os - - use_cuda = True - # 注意:如果你使用CPU运行程序,需要具体设置CPU_NUM, - # 否则fluid会把逻辑核的所有数目设为CPU_NUM, - # 在这种情况下,输入的batch size应大于CPU_NUM, - # 否则程序会异常中断。 - if not use_cuda: - os.environ['CPU_NUM'] = str(2) - - train_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): - data = fluid.layers.data(name='X', shape=[1], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(startup_program) - - parallel_exe = fluid.ParallelExecutor(use_cuda=use_cuda, - main_program=train_program, - loss_name=loss.name) - - x = numpy.random.random(size=(10, 1)).astype('float32') - loss_data, = parallel_exe.run(feed={"X": x}, - fetch_list=[loss.name]) - - parallel_exe.drop_local_exe_scopes() diff --git a/doc/paddle/api/paddle/fluid/Program_cn.rst b/doc/paddle/api/paddle/fluid/Program_cn.rst deleted file mode 100644 index 2a611c7b0..000000000 --- a/doc/paddle/api/paddle/fluid/Program_cn.rst +++ /dev/null @@ -1,450 +0,0 @@ -.. _cn_api_fluid_Program: - -Program -------------------------------- - -.. py:class:: paddle.fluid.Program - - - - -**注意:默认情况下,Paddle Fluid内部默认含有** :ref:`cn_api_fluid_default_startup_program` **和** :ref:`cn_api_fluid_default_main_program` **,它们共享参数。** :ref:`cn_api_fluid_default_startup_program` **只运行一次来初始化参数,** :ref:`cn_api_fluid_default_main_program` **在每个mini batch中运行并更新权重。** - -Program是Paddle Fluid对于计算图的一种静态描述,使用Program的构造函数可以创建一个Program。Program中包括至少一个 :ref:`api_guide_Block` ,当 :ref:`api_guide_Block` 中存在条件选择的控制流OP(例如 :ref:`cn_api_fluid_layers_While` 等)时,该Program将会含有嵌套着的 :ref:`api_guide_Block` 即控制流外部的 :ref:`api_guide_Block` 将包含着控制流内部的 :ref:`api_guide_Block` ,而嵌套的 :ref:`api_guide_Block` 的元素访问控制将由具体的控制流OP来决定。关于Program具体的结构和包含的类型请参阅 `framework.proto `_ -。 - -一个Program的集合通常包含初始化程序(startup_program)与主程序(main_program),初始化程序是一个包含一些初始化工作的Program,主程序将会包含用来训练的网络结构和变量,在使用同一个 :ref:`api_guide_executor` 执行时他们会共享初始化工作的结果,例如初始化的参数。一个Program的集合可以被用来测试或者训练,被用来训练时, ``Paddle Fluid`` 将会利用所有用户使用的OP和变量来搭建一个训练网络,被用来测试时, 可以通过调用Program相关的接口例如:`clone` 剪去一些与测试无关的OP和变量,比如反向传播的OP和变量。 - - -返回:创建的空的Program - -返回值类型:Program - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program=main_program, startup_program=startup_program): - x = fluid.layers.data(name="x", shape=[-1, 784], dtype='float32') - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='int32') - z = fluid.layers.fc(name="fc", input=x, size=10, act="relu") - - # start_up program here will share fc's weight with main program - print("main program is: {}".format(main_program)) - - print("start up program is: {}".format(startup_program)) - - -.. py:method:: to_string(throw_on_error, with_details=False) - -将Program转换为字符串 - -参数: - - **throw_on_error** (bool) - 是否在没有设置必需字段时抛出异常。 - - **with_details** (bool) - 值为true时,打印更多关于变量和参数的信息,如trainable, optimize_attr等 - -返回: 将Program转换为字符串 - -返回类型: str - -抛出异常: ``ValueError`` - 当 ``throw_on_error == true`` ,当没有设置任何必需的字段时,抛出 ``ValueError`` 。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - prog = fluid.default_main_program() - x = fluid.layers.data(name="X", shape=[2,3], dtype="float32", append_batch_size=False) - pred = fluid.layers.fc(x, size=3) - prog_string = prog.to_string(throw_on_error=True, with_details=False) - prog_string_with_details = prog.to_string(throw_on_error=False, with_details=True) - print("program string without detail: {}".format(prog_string)) - print("program string with detail: {}".format(prog_string_with_details)) - -.. py:method:: clone(for_test=False) - -**注意:** - **1.** ``Program.clone()`` **方法不会克隆例如** :ref:`cn_api_fluid_io_DataLoader` **这样的数据读取相关的部分,这可能会造成的数据读取部分在克隆后丢失** - - **2. 此API当** ``for_test=True`` **时将会裁剪部分OP和变量。为防止错误的裁剪,推荐在** :ref:`cn_api_fluid_backward_append_backward` **和执行优化器之前使用** ``clone(for_test=True)`` 。 - - -当 ``for_test=True`` 时创建一个新的、仅包含当前Program前向内容的Program。否则创建一个新的,和当前Program完全相同的Program - -有些OP,在训练和测试之间的行为是不同的,比如 :ref:`cn_api_fluid_layers_batch_norm` 。它们有一个属性 ``is_test`` 来控制行为。当 ``for_test=True`` 时,此方法将把它们的 ``is_test`` 属性更改为True。 - -- 克隆Program用于训练时,将 ``for_test`` 设置为False。 -- 克隆Program用于测试时,将 ``for_test`` 设置为True。虽然在这种情况下,如果在使用了优化器之后调用 ``clone`` 我们依旧会对Program当中反向执行以及优化器相关的内容进行自动裁剪,但是,我们强烈建议在使用优化器之前使用 ``clone`` 例如如果使用的是 :ref:`cn_api_fluid_optimizer_Momentum` 可以这样去使用: - -**代码示例** - - :: - - import paddle.fluid as fluid - img = fluid.layers.data(name='image', shape=[784]) - pred = fluid.layers.fc(input=img, size=10, act='relu') - loss = fluid.layers.mean(pred) - ## 我们推荐在使用 Optimizer前使用clone()接口 - test_program = fluid.default_main_program().clone(for_test=True) - optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9) - optimizer.minimize(loss) - -参数: - - **for_test** (bool) – 取值为True时,clone方法内部会把operator的属性 ``is_test`` 设置为 True, 并裁剪反向OP和参数优化OP,默认值为False - -返回:当 ``for_test=True`` 时返回一个新的、仅包含当前Program前向内容的Program。否则返回一个新的,和当前Program完全相同的Program - -返回类型: Program - -**代码示例** - -注意,Program在clone后的顺序可能不同,这不会影响的训练或测试进程。在下面的示例中,我们提供了一个简单的方法print_prog(Program)来打印程序描述,以确保clone后仍能得到同样的打印结果: - -.. code-block:: python - - import paddle.fluid as fluid - import six - - - def print_prog(prog): - for name, value in sorted(six.iteritems(prog.block(0).vars)): - print(value) - for op in prog.block(0).ops: - print("op type is {}".format(op.type)) - print("op inputs are {}".format(op.input_arg_names)) - print("op outputs are {}".format(op.output_arg_names)) - for key, value in sorted(six.iteritems(op.all_attrs())): - if key not in ['op_callstack', 'op_role_var']: - print(" [ attrs: {}: {} ]".format(key, value)) - -1.克隆一个Program,示例代码如下。 - -.. code-block:: python - - import paddle.fluid as fluid - import six - - def print_prog(prog): - for name, value in sorted(six.iteritems(prog.block(0).vars)): - print(value) - for op in prog.block(0).ops: - print("op type is {}".format(op.type)) - print("op inputs are {}".format(op.input_arg_names)) - print("op outputs are {}".format(op.output_arg_names)) - for key, value in sorted(six.iteritems(op.all_attrs())): - if key not in ['op_callstack', 'op_role_var']: - print(" [ attrs: {}: {} ]".format(key, value)) - - train_program = fluid.Program() - startup_program = fluid.Program() - - # ``startup_program`` 被用来执行一些参数初始化工作 - # ``main_program`` 被用来容纳网络 - with fluid.program_guard(train_program, startup_program): - with fluid.unique_name.guard(): - img = fluid.layers.data(name='image', shape=[784]) - hidden = fluid.layers.fc(input=img, size=200, act='relu') - hidden = fluid.layers.dropout(hidden, dropout_prob=0.5) - loss = fluid.layers.cross_entropy( - input=fluid.layers.fc(hidden, size=10, act='softmax'), - label=fluid.layers.data(name='label', shape=[1], dtype='int64')) - avg_loss = fluid.layers.mean(loss) - test_program = train_program.clone(for_test=True) - print_prog(test_program) - - # 由于需要使训练和测试参数共享,我们需要使用训练的 ``startup_program`` - # 来代替测试用的 ``startup_program``, 尽管测试的 ``startup_program`` 里面什么也没有。 - - # 在Paddle Fluid中我们会通过同样的变量名来共享权重. - # 训练和测试程序的所有参数将会拥有同样的名字,这将会使训练和测试程序实现参数的共享, - # 所以我们使用训练程序的 ``startup_program`` .并且由于测试的 ``startup_program`` 什么也没有, - # 因此它是一个新的程序. - with fluid.program_guard(train_program, startup_program): - with fluid.unique_name.guard(): - sgd = fluid.optimizer.SGD(learning_rate=1e-3) - sgd.minimize(avg_loss) - -2.如果分别运行 train Program 和 test Program,则可以不使用clone。 - -.. code-block:: python - - import paddle.fluid as fluid - import six - - def print_prog(prog): - for name, value in sorted(six.iteritems(prog.block(0).vars)): - print(value) - for op in prog.block(0).ops: - print("op type is {}".format(op.type)) - print("op inputs are {}".format(op.input_arg_names)) - print("op outputs are {}".format(op.output_arg_names)) - for key, value in sorted(six.iteritems(op.all_attrs())): - if key not in ['op_callstack', 'op_role_var']: - print(" [ attrs: {}: {} ]".format(key, value)) - - def network(): - img = fluid.layers.data(name='image', shape=[784]) - hidden = fluid.layers.fc(input=img, size=200, act='relu') - hidden = fluid.layers.dropout(hidden, dropout_prob=0.5) - loss = fluid.layers.cross_entropy( - input=fluid.layers.fc(hidden, size=10, act='softmax'), - label=fluid.layers.data(name='label', shape=[1], dtype='int64')) - avg_loss = fluid.layers.mean(loss) - return avg_loss - - train_program_2 = fluid.Program() - startup_program_2 = fluid.Program() - test_program_2 = fluid.Program() - with fluid.program_guard(train_program_2, startup_program_2): - with fluid.unique_name.guard(): - avg_loss = network() - sgd = fluid.optimizer.SGD(learning_rate=1e-3) - sgd.minimize(avg_loss) - # 不使用测试阶段的启动程序 - with fluid.program_guard(test_program_2, startup_program_2): - with fluid.unique_name.guard(): - avg_loss = network() - print_prog(test_program_2) - -上边两个代码片段生成和打印的Program是一样的。 - -.. py:staticmethod:: parse_from_string(binary_str) - -通过对 `protobuf `_ 的反序列化,转换成Program - - -参数: - - **binary_str_type** (str) – `protobuf `_ 二进制字符串 - -返回:反序列化后的 Program - -返回类型:Program - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - startup_prog = fluid.Program() - main_prog = fluid.Program() - with fluid.program_guard(startup_prog, main_prog): - x = fluid.layers.data( - name='X', shape=[1000, 784], dtype='float32', append_batch_size=False) - - y = fluid.layers.data( - name='Y', shape=[784, 100], dtype='float32', append_batch_size=False) - - z = fluid.layers.mul(x=x, y=y) - - binary_str = fluid.default_main_program().desc.serialize_to_string() - prog_restored = fluid.default_main_program().parse_from_string(binary_str) - - print(fluid.default_main_program()) - print(prog_restored) - - # 这里打印出的两个Program应该是一模一样的 - -.. py:attribute:: num_blocks - -该Program中的 :ref:`api_guide_Block` 的个数 - -返回: 该Program中的 :ref:`api_guide_Block` 的个数 - -返回类型:int - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - prog = fluid.default_main_program() - num_blocks = prog.num_blocks - print(num_blocks) - - ## 1 - ## 当前Program中只有一个Block,即全局的Block - -.. py:attribute:: random_seed - -**注意:必须在相关OP被添加之前设置。** - -程序中随机运算符的默认随机种子。0意味着随机生成随机种子。 - -返回:该Program中当前正在使用的random seed - -返回类型:int64 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - prog = fluid.default_main_program() - random_seed = prog.random_seed - x_var = fluid.layers.data(name="X", shape=[3,3], dtype="float32", append_batch_size=False) - print(random_seed) - ## 0 - ## 默认的random seed是 0 - - # 这里我们必须要在fluid.layers.dropout之前设置random_seed - prog.random_seed = 1 - z_var = fluid.layers.dropout(x_var, 0.7) - - print(prog.random_seed) - ## 1 - ## 修改后random seed变成了 1 - -.. py:method:: global_block() - -获取该Program的第一个 :ref:`api_guide_Block` 。 - -返回:该Program的第一个 :ref:`api_guide_Block` - -返回类型::ref:`api_guide_Block` - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - prog = fluid.default_main_program() - gb_block = prog.global_block() - print(gb_block) - ## - ## idx: 0 - ## parent_idx: -1 - ## 打印出了当前全局Block的描述 - -.. py:method:: block(index) - -返回该Program中 , ``index`` 指定的 :ref:`api_guide_Block` 。 ``index`` 类型为int - -参数: - - **index** (int) - 需要获取的 :ref:`api_guide_Block` 的index - -返回: 该Program中index对应的那个 :ref:`api_guide_Block` - -返回类型: :ref:`api_guide_Block` - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - prog = fluid.default_main_program() - block_0 = prog.block(0) - print(block_0) - ## - ## idx: 0 - ## parent_idx: -1 - ## 打印出了0号Block的描述 - -.. py:method:: current_block() - -获取当前 :ref:`api_guide_Block` 。当前 :ref:`api_guide_Block` 是用来添加OP的。 - -返回: 该Program中用户当前所在的 :ref:`api_guide_Block` - -返回类型: :ref:`api_guide_Block` - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - prog = fluid.default_main_program() - current_blk = prog.current_block() - print(current_blk) - ## - ## idx: 0 - ## parent_idx: -1 - ## 打印出了当前Block的描述 - -.. py:method:: list_vars() - -获取当前Program中所有变量。返回值是一个可迭代对象(iterable object)。 - -返回: Generator 会yield每个Program中的变量 - -返回类型: iterable 的 :ref:`api_guide_Variable` - - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - prog = fluid.default_main_program() - img = fluid.layers.data(name='img', shape=[1,28,28], dtype='float32') - label = fluid.layers.data(name='label', shape=[128,1], dtype='int64') - for var in prog.list_vars(): - print(var) - - # 这里将会打印出当前Program中所有的Variable - -.. py:method:: all_parameters() - -获取当前Program中所有的 :ref:`api_guide_parameter` 。返回值是一个列表。 - -返回: 一个包含当前Program中所有参数的列表。 - -返回类型: list[ :ref:`api_guide_parameter` ] - - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - program = fluid.default_main_program() - data = fluid.data(name='x', shape=[None, 13], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10) - loss = fluid.layers.mean(hidden) - fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) - - for param in program.all_parameters(): - print(param) - - # 这里将会打印出当前Program中所有的Parameters,在本例中,输出结果是: - # - # name: "fc_0.w_0" - # type { - # type: LOD_TENSOR - # lod_tensor { - # tensor { - # data_type: FP32 - # dims: 13 - # dims: 10 - # } - # } - # } - # - # persistable: true - # name: "fc_0.b_0" - # type { - # type: LOD_TENSOR - # lod_tensor { - # tensor { - # data_type: FP32 - # dims: 10 - # } - # } - # } - # persistable: true - # - # 这里print(param)将会打印出一个参数所有的属性,包括name,type和persistable, - # 你可以访问一个参数的指定属性,例如param.name,param.type \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/WeightNormParamAttr_cn.rst b/doc/paddle/api/paddle/fluid/WeightNormParamAttr_cn.rst deleted file mode 100644 index d17b0380c..000000000 --- a/doc/paddle/api/paddle/fluid/WeightNormParamAttr_cn.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. _cn_api_fluid_WeightNormParamAttr: - -WeightNormParamAttr -------------------------------- - - -.. py:class:: paddle.fluid.WeightNormParamAttr(dim=None, name=None, initializer=None, learning_rate=1.0, regularizer=None, trainable=True, do_model_average=False) - -:api_attr: 声明式编程模式(静态图) - - - -.. note:: - 该类中的 ``gradient_clip`` 属性在2.0版本会废弃,推荐在初始化 ``optimizer`` 时设置梯度裁剪。共有三种裁剪策略: :ref:`cn_api_fluid_clip_GradientClipByGlobalNorm` 、 - :ref:`cn_api_fluid_clip_GradientClipByNorm` 、 :ref:`cn_api_fluid_clip_GradientClipByValue` 。 - -该类定义了权重归一化(Weight Normalization)的参数。权重归一化可以将神经网络中权重向量的长度与其方向解耦,详细的定义与实现可以参考论文:`Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks `_ - -参数: - - **dim** (int) - 进行归一化操作(norm)的切片所在维度,是小于权重Tensor rank的非负数。比如卷积的权重shape是 :math:`[cout, cin, kh, kw]` , rank是4,则dim可以选0,1,2,3;fc的权重shape是 :math:`[cout, cin]` ,rank是2,dim可以选0,1。 dim 默认为None,如果为None就对所有元素做归一化(norm)。 - - **name** (None|str) - 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认为None。 - - **initializer** (Initializer) - 初始化参数方法,例如 ``initializer = fluid.initializer.ConstantInitializer(1.0)`` 。默认为None,如果为None则使用默认初始化函数 `Xavier()` 。 - - **learning_rate** (float32) - 学习率,优化过程 :math:`global\_lr∗parameter\_lr∗scheduler\_factor` 的学习速率,默认为1.0。 - - **regularizer** (WeightDecayRegularizer,可选) - 正则化方法。支持两种正则化策略: :ref:`cn_api_fluid_regularizer_L1Decay` 、 - :ref:`cn_api_fluid_regularizer_L2Decay` ,如果在 ``optimizer`` (例如 :ref:`cn_api_fluid_optimizer_SGDOptimizer` ) 中也 - 设置了正则化,``optimizer`` 中的正则化将被忽略。默认值为None,表示没有正则化。 - - **trainable** (bool) - 可选,指明参数是否可训练,默认为True。 - - **do_model_average** (bool) - 可选,指明参数是否需要模型平均化操作(Model Average),默认为False。 - - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - data = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") - fc = fluid.layers.fc(input=data, - size=1000, - param_attr=fluid.WeightNormParamAttr( - dim=None, - name='weight_norm_param', - initializer=fluid.initializer.ConstantInitializer(1.0), - learning_rate=1.0, - regularizer=fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.1), - trainable=True, - do_model_average=False)) - - - diff --git a/doc/paddle/api/paddle/fluid/default_main_program_cn.rst b/doc/paddle/api/paddle/fluid/default_main_program_cn.rst deleted file mode 100644 index 4759fafea..000000000 --- a/doc/paddle/api/paddle/fluid/default_main_program_cn.rst +++ /dev/null @@ -1,59 +0,0 @@ -.. _cn_api_fluid_default_main_program: - -default_main_program -------------------------------- - -.. py:function:: paddle.fluid.default_main_program() - - - - - -此接口可以获取当前用于存储op和variable描述信息的 ``default main program`` - -``fluid.layers`` 接口中添加的op和variable会存储在 ``default main program`` 中 - -``default main program`` 是fluid的许多编程接口中Program参数的默认值。例如对于 ``Executor.run()`` 如果用户没有传入Program参数,会默认使用 ``default main program`` - -可以使用 :ref:`cn_api_fluid_program_guard` 来替换 ``default main program`` - -参数: - - 无 - -返回: 当前默认用于存储op和variable描述的Program - -返回类型: :ref:`cn_api_fluid_Program` - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - - #示例网络: - data = fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') - - conv1 = fluid.layers.conv2d(data, 4, 5, 1, act=None) - bn1 = fluid.layers.batch_norm(conv1, act='relu') - pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2) - conv2 = fluid.layers.conv2d(pool1, 16, 5, 1, act=None) - bn2 = fluid.layers.batch_norm(conv2, act='relu') - pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2) - - fc1 = fluid.layers.fc(pool2, size=50, act='relu') - fc2 = fluid.layers.fc(fc1, size=102, act='softmax') - - loss = fluid.layers.cross_entropy(input=fc2, label=label) - loss = fluid.layers.mean(loss) - opt = fluid.optimizer.Momentum( - learning_rate=0.1, - momentum=0.9, - regularization=fluid.regularizer.L2Decay(1e-4)) - opt.minimize(loss) - - print(fluid.default_main_program().num_blocks) - print(fluid.default_main_program().blocks[0].var('image')) - - - diff --git a/doc/paddle/api/paddle/fluid/default_startup_program_cn.rst b/doc/paddle/api/paddle/fluid/default_startup_program_cn.rst deleted file mode 100644 index bfc247c29..000000000 --- a/doc/paddle/api/paddle/fluid/default_startup_program_cn.rst +++ /dev/null @@ -1,45 +0,0 @@ -.. _cn_api_fluid_default_startup_program: - - - - -default_startup_program -------------------------------- - -.. py:function:: paddle.fluid.default_startup_program() - - - - - - -该函数可以获取默认/全局 startup :ref:`cn_api_fluid_Program` (初始化启动程序)。 - - :ref:`_cn_api_fluid_layers` 中的函数会新建参数或 :ref:`cn_api_paddle_data_reader_reader` (读取器) 或 `NCCL `_ 句柄作为全局变量。 - -startup_program会使用内在的OP(算子)去初始化他们,并由 :ref:`_cn_api_fluid_layers` 中的函数将这些OP追加到startup :ref:`cn_api_fluid_Program` 中。 - -该函数将返回默认的或当前的startup_program。用户可以使用 :ref:`cn_api_fluid_program_guard` 去切换 :ref:`cn_api_fluid_default_startup_program` 。 - -返回: 当前的默认/全局 初始化 :ref:`cn_api_fluid_Program` - -返回类型: :ref:`cn_api_fluid_Program` - -**代码示例:** - -.. code-block:: python - - import paddle.fluid as fluid - - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program=main_program, startup_program=startup_program): - x = fluid.layers.data(name="x", shape=[-1, 784], dtype='float32') - y = fluid.layers.data(name="y", shape=[-1, 1], dtype='int32') - z = fluid.layers.fc(name="fc", input=x, size=10, act="relu") - - print("main program is: {}".format(fluid.default_main_program())) - print("start up program is: {}".format(fluid.default_startup_program())) - - - diff --git a/doc/paddle/api/paddle/fluid/dygraph/base/disable_dygraph_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/base/disable_dygraph_cn.rst deleted file mode 100644 index 59dc22b7b..000000000 --- a/doc/paddle/api/paddle/fluid/dygraph/base/disable_dygraph_cn.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. _cn_api_fluid_disable_dygraph: - -disable_dygraph -------------------------------- - -.. py:function:: paddle.fluid.disable_dygraph() - -该接口关闭动态图模式。 - -返回:无 - -**示例代码** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - fluid.enable_dygraph() # Now we are in dygraph mode - print(fluid.in_dygraph_mode()) # True - fluid.disable_dygraph() - print(fluid.in_dygraph_mode()) # False - diff --git a/doc/paddle/api/paddle/fluid/dygraph/base/enable_dygraph_cn.rst b/doc/paddle/api/paddle/fluid/dygraph/base/enable_dygraph_cn.rst deleted file mode 100644 index 0df485bd8..000000000 --- a/doc/paddle/api/paddle/fluid/dygraph/base/enable_dygraph_cn.rst +++ /dev/null @@ -1,25 +0,0 @@ -.. _cn_api_fluid_enable_dygraph: - -enable_dygraph -------------------------------- - -.. py:function:: paddle.fluid.enable_dygraph(place=None) - -该接口打开动态图模式。 - -参数: - - **place** (fluid.CPUPlace 或 fluid.CUDAPlace,可选) - 执行动态图的设备数目。若为None,则设备根据paddle的编译方式决定。默认值为 ``None``。 - -返回:无 - -**示例代码** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - fluid.enable_dygraph() # Now we are in dygraph mode - print(fluid.in_dygraph_mode()) # True - fluid.disable_dygraph() - print(fluid.in_dygraph_mode()) # False - diff --git a/doc/paddle/api/paddle/fluid/framework/in_dygraph_mode_cn.rst b/doc/paddle/api/paddle/fluid/framework/in_dygraph_mode_cn.rst deleted file mode 100644 index 06c960ce5..000000000 --- a/doc/paddle/api/paddle/fluid/framework/in_dygraph_mode_cn.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. _cn_api_fluid_in_dygraph_mode: - -in_dygraph_mode -------------------------------- - -.. py:function:: paddle.fluid.in_dygraph_mode() - - - - -该接口检查程序是否在动态图模式中运行。 -可以通过 ``fluid.dygraph.guard`` 接口开启动态图模式。 - -返回:如果程序是在动态图模式下运行的,则返回 ``True``。 - -返回类型:bool - -**示例代码** - -.. code-block:: python - - import paddle.fluid as fluid - - fluid.enable_dygraph() # 现在进入 dygragh 模式 - print(fluid.in_dygraph_mode()) # True - fluid.disable_dygraph() - print(fluid.in_dygraph_mode()) # False - - diff --git a/doc/paddle/api/paddle/fluid/global_scope_cn.rst b/doc/paddle/api/paddle/fluid/global_scope_cn.rst deleted file mode 100644 index 277bb23db..000000000 --- a/doc/paddle/api/paddle/fluid/global_scope_cn.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. _cn_api_fluid_executor_global_scope: - -global_scope -------------------------------- - - -.. py:function:: paddle.fluid.global_scope() - -:api_attr: 声明式编程模式(静态图) - - - -获取全局/默认作用域实例。很多API使用默认 ``global_scope`` ,例如 ``Executor.run`` 等。 - -返回:全局/默认作用域实例 - -返回类型:Scope - -**示例代码** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy - - fluid.global_scope().var("data").get_tensor().set(numpy.ones((1, 2)), fluid.CPUPlace()) - data = numpy.array(fluid.global_scope().find_var("data").get_tensor()) - print(data) # [[1. 1.]] diff --git a/doc/paddle/api/paddle/fluid/gradients_cn.rst b/doc/paddle/api/paddle/fluid/gradients_cn.rst deleted file mode 100644 index 0165f7f2f..000000000 --- a/doc/paddle/api/paddle/fluid/gradients_cn.rst +++ /dev/null @@ -1,39 +0,0 @@ -.. _cn_api_fluid_backward_gradients: - -gradients -------------------------------- - - -.. py:function:: paddle.fluid.backward.gradients(targets, inputs, target_gradients=None, no_grad_set=None) - -:api_attr: 声明式编程模式(静态图) - - - -将目标梯度反向传播到输入。 - -参数: - - **targets** (Variable|list[Variable]) – 目标变量 - - **inputs** (Variable|list[Variable]) – 输入变量 - - **target_gradients** (Variable|list[Variable],可选) – 目标的梯度变量,应与目标变量形状相同;如果设置为None,则以1初始化所有梯度变量 - - **no_grad_set** (set[Variable|str],可选) – 在 `block0` ( :ref:`api_guide_Block` ) 中要忽略梯度的 :ref:`api_guide_Variable` 的名字的集合。所有的 :ref:`api_guide_Block` 中带有 ``stop_gradient = True`` 的所有 :ref:`api_guide_Variable` 的名字都会被自动添加到此集合中。如果该参数不为 ``None``,则会将该参数集合的内容添加到默认的集合中。默认值为 ``None``。 - - -返回:数组,包含与输入对应的梯度。如果一个输入不影响目标函数,则对应的梯度变量为None - -返回类型:(list[Variable]) - -**示例代码** - -.. code-block:: python - - import paddle.fluid as fluid - - x = fluid.data(name='x', shape=[None,2,8,8], dtype='float32') - x.stop_gradient=False - y = fluid.layers.conv2d(x, 4, 1, bias_attr=False) - y = fluid.layers.relu(y) - y = fluid.layers.conv2d(y, 4, 1, bias_attr=False) - y = fluid.layers.relu(y) - z = fluid.gradients([y], x) - print(z) \ No newline at end of file diff --git a/doc/paddle/api/paddle/fluid/initializer/Normal_cn.rst b/doc/paddle/api/paddle/fluid/initializer/Normal_cn.rst deleted file mode 100644 index ce50e67bd..000000000 --- a/doc/paddle/api/paddle/fluid/initializer/Normal_cn.rst +++ /dev/null @@ -1,115 +0,0 @@ -.. _cn_api_fluid_layers_Normal: - -Normal -------------------------------- - -.. py:class:: paddle.fluid.layers.Normal(loc, scale) - - - - -正态分布 - -数学公式: - -.. math:: - - pdf(x; \mu, \sigma) = \frac{1}{Z}e^{\frac {-0.5 (x - \mu)^2} {\sigma^2} } - - Z = (2 \pi \sigma^2)^{0.5} - -上面的数学公式中: - -:math:`loc = \mu` : 平均值。 -:math:`scale = \sigma` : 标准差。 -:math:`Z`: 正态分布常量。 - -参数: - - **loc** (float|list|numpy.ndarray|Variable) - 正态分布平均值。数据类型为float32。 - - **scale** (float|list|numpy.ndarray|Variable) - 正态分布标准差。数据类型为float32。 - -**代码示例**: - -.. code-block:: python - - import numpy as np - from paddle.fluid import layers - from paddle.fluid.layers import Normal - - # 定义参数为float的正态分布。 - dist = Normal(loc=0., scale=3.) - # 定义一组有两个数的正态分布。 - # 第一组为均值1,标准差11,第二组为均值2,标准差22。 - dist = Normal(loc=[1., 2.], scale=[11., 22.]) - # 得到3个样本, 返回一个 3 x 2 张量。 - dist.sample([3]) - - # 通过广播的方式,定义一个两个参数的正态分布。 - # 均值都是1,标准差不同。 - dist = Normal(loc=1., scale=[11., 22.]) - - # 一个完整的例子 - value_npdata = np.array([0.8], dtype="float32") - value_tensor = layers.create_tensor(dtype="float32") - layers.assign(value_npdata, value_tensor) - - normal_a = Normal([0.], [1.]) - normal_b = Normal([0.5], [2.]) - - sample = normal_a.sample([2]) - # 一个由定义好的正太分布随机生成的张量,维度为: [2, 1] - entropy = normal_a.entropy() - # [1.4189385] with shape: [1] - lp = normal_a.log_prob(value_tensor) - # [-1.2389386] with shape: [1] - kl = normal_a.kl_divergence(normal_b) - # [0.34939718] with shape: [1] - - -.. py:function:: sample(shape, seed=0) - -生成指定维度的样本 - -参数: - - **shape** (list) - 1维列表,指定生成样本的维度。数据类型为int32。 - - **seed** (int) - 长整型数。 - -返回:预先设计好维度的张量, 数据类型为float32 - -返回类型:Variable - -.. py:function:: entropy() - -信息熵 - -返回:正态分布的信息熵, 数据类型为float32 - -返回类型:Variable - -.. py:function:: log_prob(value) - -对数概率密度函数 - -参数: - - **value** (Variable) - 输入张量。数据类型为float32或float64。 - -返回:对数概率, 数据类型与value相同 - -返回类型:Variable - -.. py:function:: kl_divergence(other) - -两个正态分布之间的KL散度。 - -参数: - - **other** (Normal) - Normal的实例。 - -返回:两个正态分布之间的KL散度, 数据类型为float32 - -返回类型:Variable - - - - - - diff --git a/doc/paddle/api/paddle/fluid/initializer/Uniform_cn.rst b/doc/paddle/api/paddle/fluid/initializer/Uniform_cn.rst deleted file mode 100644 index 59e1544b3..000000000 --- a/doc/paddle/api/paddle/fluid/initializer/Uniform_cn.rst +++ /dev/null @@ -1,106 +0,0 @@ -.. _cn_api_fluid_layers_Uniform: - -Uniform -------------------------------- - -.. py:class:: paddle.fluid.layers.Uniform(low, high) - - - - -均匀分布 - -概率密度函数(pdf)为: - -.. math:: - - pdf(x; a, b) = \frac{1}{Z}, a <=x < b - - Z = b - a - -上面的数学公式中: - -:math:`low = a` 。 -:math:`high = b` 。 -:math:`Z`: 正态分布常量。 - -参数low和high的维度必须能够支持广播。 - -参数: - - **low** (float|list|numpy.ndarray|Variable) - 均匀分布的下边界。数据类型为float32。 - - **high** (float|list|numpy.ndarray|Variable) - 均匀分布的上边界。数据类型为float32。 - -**代码示例**: - -.. code-block:: python - - import numpy as np - from paddle.fluid import layers - from paddle.fluid.layers import Uniform - - # 定义参数为float的均匀分布 - u1 = Uniform(low=3.0, high=4.0) - # 定义参数为list的均匀分布 - u2 = Uniform(low=[1.0, 2.0], - high=[3.0, 4.0]) - # 通过广播的方式,定义一个均匀分布 - u3 = Uniform(low=[[1.0, 2.0], - [3.0, 4.0]], - high=[[1.5, 2.5], - [3.5, 4.5]]) - - # 通过广播的方式,定义一个均匀分布 - u4 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) - - # 一个完整的例子 - value_npdata = np.array([0.8], dtype="float32") - value_tensor = layers.create_tensor(dtype="float32") - layers.assign(value_npdata, value_tensor) - - uniform = Uniform([0.], [2.]) - - sample = uniform.sample([2]) - # 一个由定义好的均匀分布随机生成的张量,维度为: [2, 1] - entropy = uniform.entropy() - # [0.6931472] with shape: [1] - lp = uniform.log_prob(value_tensor) - # [-0.6931472] with shape: [1] - - -.. py:function:: sample(shape, seed=0) - -生成指定维度的样本 - -参数: - - **shape** (list) - 1维列表,指定生成样本的维度。数据类型为int32。 - - **seed** (int) - 长整型数。 - -返回:预先设计好维度的张量, 数据类型为float32 - -返回类型:Variable - -.. py:function:: entropy() - -信息熵 - -返回:均匀分布的信息熵, 数据类型为float32 - -返回类型:Variable - -.. py:function:: log_prob(value) - -对数概率密度函数 - -参数: - - **value** (Variable) - 输入张量。数据类型为float32或float64。 - -返回:对数概率, 数据类型与value相同 - -返回类型:Variable - - - - - - - diff --git a/doc/paddle/api/paddle/fluid/io/load_cn.rst b/doc/paddle/api/paddle/fluid/io/load_cn.rst deleted file mode 100644 index f00197697..000000000 --- a/doc/paddle/api/paddle/fluid/io/load_cn.rst +++ /dev/null @@ -1,168 +0,0 @@ -.. _cn_api_fluid_dygraph_jit_load: - -load ------------------ - -.. py:function:: paddle.fluid.dygraph.jit.load(model_path, configs=None) - -:api_attr: 命令式编程模式(动态图) - -将接口 :ref:`cn_api_fluid_dygraph_jit_save` 或者 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型载入为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` ,用于预测推理或者fine-tune训练。 - -.. note:: - 由于一些历史原因,如果载入的模型是通过 :ref:`cn_api_fluid_io_save_inference_model` 存储的, - 在使用它进行fine-tune训练时会存在一些局限: - 1. 命令式编程模式不支持 ``LoDTensor`` ,所有原先输入变量或者参数依赖于LoD信息的模型暂时无法使用; - 2. 所有存储模型的feed变量都需要被传入 ``Translatedlayer`` 的forward方法; - 3. 原模型变量的 ``stop_gradient`` 信息已丢失且无法准确恢复; - 4. 原模型参数的 ``trainable`` 信息已丢失且无法准确恢复。 - -参数: - - **model_path** (str) - 存储模型的目录。 - - **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 - -返回:TranslatedLayer - 一个能够执行存储模型的 ``Layer`` 对象。 - -**示例代码** - -1. 载入由接口 :ref:`cn_api_fluid_dygraph_jit_save` 存储的模型进行预测推理及fine-tune训练。 - - .. code-block:: python - - import numpy as np - import paddle.fluid as fluid - from paddle.fluid.dygraph import Linear - from paddle.fluid.dygraph import declarative - BATCH_SIZE = 32 - BATCH_NUM = 20 - def random_batch_reader(): - def _get_random_images_and_labels(image_shape, label_shape): - image = np.random.random(size=image_shape).astype('float32') - label = np.random.random(size=label_shape).astype('int64') - return image, label - def __reader__(): - for _ in range(BATCH_NUM): - batch_image, batch_label = _get_random_images_and_labels( - [BATCH_SIZE, 784], [BATCH_SIZE, 1]) - yield batch_image, batch_label - return __reader__ - class LinearNet(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): - super(LinearNet, self).__init__() - self._linear = Linear(in_size, out_size) - @declarative - def forward(self, x): - return self._linear(x) - # 开启命令式编程模式 - fluid.enable_dygraph() - # 1. 训练存储模型. - # 创建网络 - net = LinearNet(784, 1) - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) - # 创建DataLoader - train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator(random_batch_reader()) - # 训练 - for data in train_loader(): - img, label = data - label.stop_gradient = True - cost = net(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - avg_loss.backward() - adam.minimize(avg_loss) - net.clear_gradients() - model_path = "linear.example.model" - fluid.dygraph.jit.save( - layer=net, - model_path=model_path, - input_spec=[img]) - # 2. 载入模型 & 预测 - # 载入模型 - infer_net = fluid.dygraph.jit.load(model_path) - # 预测 - x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) - pred = infer_net(x) - # 3. 载入模型 & fine-tune训练 - # 载入模型 - train_net = fluid.dygraph.jit.load(model_path) - train_net.train() - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=train_net.parameters()) - # 创建DataLoader - train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator(random_batch_reader()) - # fine-tune训练 - for data in train_loader(): - img, label = data - label.stop_gradient = True - cost = train_net(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - avg_loss.backward() - adam.minimize(avg_loss) - train_net.clear_gradients() - - -2. 载入由接口 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型进行预测推理及fine-tune训练。 - - .. code-block:: python - - import numpy as np - import paddle.fluid as fluid - BATCH_SIZE = 32 - BATCH_NUM = 20 - def random_batch_reader(): - def _get_random_images_and_labels(image_shape, label_shape): - image = np.random.random(size=image_shape).astype('float32') - label = np.random.random(size=label_shape).astype('int64') - return image, label - def __reader__(): - for _ in range(BATCH_NUM): - batch_image, batch_label = _get_random_images_and_labels( - [BATCH_SIZE, 784], [BATCH_SIZE, 1]) - yield batch_image, batch_label - return __reader__ - img = fluid.data(name='img', shape=[None, 784], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') - pred = fluid.layers.fc(input=img, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=pred, label=label) - avg_loss = fluid.layers.mean(loss) - optimizer = fluid.optimizer.SGD(learning_rate=0.001) - optimizer.minimize(avg_loss) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - loader = fluid.io.DataLoader.from_generator( - feed_list=[img, label], capacity=5, iterable=True) - loader.set_batch_generator(random_batch_reader(), places=place) - # 1. 训练 & 存储预测模型 - for data in loader(): - exe.run( - fluid.default_main_program(), - feed=data, - fetch_list=[avg_loss]) - model_path = "fc.example.model" - fluid.io.save_inference_model( - model_path, ["img"], [pred], exe) - # 开启命令式编程模式 - fluid.enable_dygraph() - # 2. 载入模型 & 预测 - fc = fluid.dygraph.jit.load(model_path) - x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) - pred = fc(x) - # 3. 载入模型 & fine-tune训练 - fc = fluid.dygraph.jit.load(model_path) - fc.train() - sgd = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=fc.parameters()) - train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator( - random_batch_reader(), places=place) - for data in train_loader(): - img, label = data - label.stop_gradient = True - cost = fc(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - avg_loss.backward() - sgd.minimize(avg_loss) diff --git a/doc/paddle/api/paddle/fluid/layers/atan_cn.rst b/doc/paddle/api/paddle/fluid/layers/atan_cn.rst deleted file mode 100644 index 382f6b09b..000000000 --- a/doc/paddle/api/paddle/fluid/layers/atan_cn.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. _cn_api_tensor_atan: - -atan -------------------------------- - -.. py:function:: paddle.atan(x, name=None, out=None) - -arctanh 激活函数。 - -.. math:: - out = tanh^{-1}(x) - -参数: - - **x(Variable)** - atan的输入Tensor,数据类型为 float32 或 float64 - - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 - - **out** (Variable, 可选) – 指定存储运算结果的Tensor。如果设置为None或者不设置,将创建新的Tensor存储运算结果,默认值为None。 - -返回:返回类型为Variable(Tensor|LoDTensor), 数据类型同输入一致。 - -**代码示例**: - -.. code-block:: python - - import numpy as np - import paddle - import paddle.fluid as fluid - - inputs = fluid.layers.data(name="x", shape = [3], dtype='float32') - output = paddle.atan(inputs) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - img = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32) - - res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) - print(res) - #[array([-0.6858003, 0.45658287, -0.5724284, 0.03708299], dtype=float32)] diff --git a/doc/paddle/api/paddle/fluid/layers/cumsum_cn.rst b/doc/paddle/api/paddle/fluid/layers/cumsum_cn.rst deleted file mode 100644 index fd9238e64..000000000 --- a/doc/paddle/api/paddle/fluid/layers/cumsum_cn.rst +++ /dev/null @@ -1,41 +0,0 @@ -.. _cn_api_fluid_layers_cumsum: - -cumsum -------------------------------- - -.. py:function:: paddle.fluid.layers.cumsum(x,axis=None,exclusive=None,reverse=None) - -:alias_main: paddle.cumsum -:alias: paddle.cumsum,paddle.tensor.cumsum,paddle.tensor.math.cumsum -:old_api: paddle.fluid.layers.cumsum - - - -沿给定轴(axis)的元素的累加和。默认结果的第一个元素和输入的第一个元素一致。如果exlusive为True,结果的第一个元素则为0。 - -参数: - - **x** (Variable) - 累加的输入,需要进行累加操作的变量Tensor/LoDTensor。 - - **axis** (int,可选) - 指明需要累加的维。-1代表最后一维。默认为:-1。 - - **exclusive** (bool,可选) - 是否执行exclusive累加。默认为:False。 - - **reverse** (bool,可选) - 若为True,则以相反顺序执行累加。默认为:False。 - -返回:Variable(Tensor)。是累加的结果,即累加器的输出。 - -返回类型:变量(Variable)。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - data = fluid.layers.data(name="input", shape=[32, 784]) - result = fluid.layers.cumsum(data, axis=0) - - - - - - - - - diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_max_cn.rst b/doc/paddle/api/paddle/fluid/layers/elementwise_max_cn.rst deleted file mode 100644 index b36097fbc..000000000 --- a/doc/paddle/api/paddle/fluid/layers/elementwise_max_cn.rst +++ /dev/null @@ -1,109 +0,0 @@ -.. _cn_api_fluid_layers_elementwise_max: - -elementwise_max -------------------------------- - -.. py:function:: paddle.fluid.layers.elementwise_max(x, y, axis=-1, act=None, name=None) - -:alias_main: paddle.elementwise_max -:alias: paddle.elementwise_max,paddle.tensor.elementwise_max,paddle.tensor.math.elementwise_max -:old_api: paddle.fluid.layers.elementwise_max - - -该OP逐元素对比输入的两个多维Tensor,并且把各个位置更大的元素保存到返回结果中。 - -等式是: - -.. math:: - Out = max(X, Y) - -- :math:`X` :多维Tensor。 -- :math:`Y` :多维Tensor。 - -此运算算子有两种情况: - 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 - 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 - -对于情况2: - 1. 用 :math:`Y` 的 ``shape`` 匹配 :math:`X` 的 ``shape``,其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 - 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis = rank(X)-rank(Y)` 。 - 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 - -例如: - -.. code-block:: text - - shape(X) = (2, 3, 4, 5), shape(Y) = (,) - shape(X) = (2, 3, 4, 5), shape(Y) = (5,) - shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 - shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 - shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 - shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 - -参数: - - **x** (Variable)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - - **y** (Variable)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - - **axis** (int32, 可选)- Y的维度对应到X维度上时的索引。默认值为 -1。 - - **act** (string, 可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。 - - **name** (string, 可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 - -返回: 维度和数据类型与 ``x`` 相同的多维Tensor。 - -返回类型: 多维Tensor。 - -**代码示例 1** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - def gen_data(): - return { - "x": np.array([2, 3, 4]), - "y": np.array([1, 5, 2]) - } - - x = fluid.layers.data(name="x", shape=[3], dtype='float32') - y = fluid.layers.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_max(x, y) - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - - print(z_value) #[2, 5, 4] - -**代码示例 2** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - def gen_data(): - return { - "x": np.ones((2, 3, 4, 5)).astype('float32'), - "y": np.zeros((3, 4)).astype('float32') - } - - x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_max(x, y, axis=1) - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - - print(z_value)#[[[[1., 1., 1., 1., 1.] .... [1., 1., 1., 1., 1.]]]] - - - - - - - - diff --git a/doc/paddle/api/paddle/fluid/layers/elementwise_min_cn.rst b/doc/paddle/api/paddle/fluid/layers/elementwise_min_cn.rst deleted file mode 100644 index 22669884e..000000000 --- a/doc/paddle/api/paddle/fluid/layers/elementwise_min_cn.rst +++ /dev/null @@ -1,106 +0,0 @@ -.. _cn_api_fluid_layers_elementwise_min: - -elementwise_min -------------------------------- - -.. py:function:: paddle.fluid.layers.elementwise_min(x, y, axis=-1, act=None, name=None) - -:alias_main: paddle.elementwise_min -:alias: paddle.elementwise_min,paddle.tensor.elementwise_min,paddle.tensor.math.elementwise_min -:old_api: paddle.fluid.layers.elementwise_min - - -该OP逐元素对比输入的两个多维Tensor,并且把各个位置更小的元素保存到返回结果中。 - -等式是: - -.. math:: - Out = min(X, Y) - -- :math:`X` :多维Tensor。 -- :math:`Y` :多维Tensor。 - -此运算算子有两种情况: - 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 - 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 - -对于情况2: - 1. 用 :math:`Y` 的 ``shape`` 匹配 :math:`X` 的 ``shape``,其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 - 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis = rank(X)-rank(Y)` 。 - 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 - -例如: - -.. code-block:: text - - shape(X) = (2, 3, 4, 5), shape(Y) = (,) - shape(X) = (2, 3, 4, 5), shape(Y) = (5,) - shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 - shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 - shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 - shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 - -参数: - - **x** (Variable)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - - **y** (Variable)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - - **axis** (int32, 可选)- Y的维度对应到X维度上时的索引。默认值为 -1。 - - **act** (string, 可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。 - - **name** (string, 可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 - -返回: 维度和数据类型与 ``x`` 相同的多维Tensor。 - -返回类型: 多维Tensor。 - -**代码示例 1** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - def gen_data(): - return { - "x": np.array([2, 3, 4]), - "y": np.array([1, 5, 2]) - } - - x = fluid.layers.data(name="x", shape=[3], dtype='float32') - y = fluid.layers.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_min(x, y) - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - - print(z_value) #[1, 3, 2] - -**代码示例 2** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - def gen_data(): - return { - "x": np.ones((2, 3, 4, 5)).astype('float32'), - "y": np.zeros((3, 4)).astype('float32') - } - - x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_min(x, y, axis=1) - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - - print(z_value)#[[[[0., 0., 0., 0., 0.] .... [0., 0., 0., 0., 0.]]]] - - - - - diff --git a/doc/paddle/api/paddle/fluid/layers/flatten_cn.rst b/doc/paddle/api/paddle/fluid/layers/flatten_cn.rst deleted file mode 100644 index 3e314f655..000000000 --- a/doc/paddle/api/paddle/fluid/layers/flatten_cn.rst +++ /dev/null @@ -1,62 +0,0 @@ -.. _cn_api_fluid_layers_flatten: - -flatten -------------------------------- - -.. py:function:: paddle.fluid.layers.flatten(x, axis=1, name=None) - -:alias_main: paddle.flatten -:alias: paddle.flatten,paddle.tensor.flatten,paddle.tensor.manipulation.flatten -:old_api: paddle.fluid.layers.flatten - - - -flatten op将输入的多维Tensor展平成2-D Tensor矩阵 - -例如: - -.. code-block:: text - - Case 1: - - 给定 - X.shape = (3, 100, 100, 4) - 且 - axis = 2 - 得到: - Out.shape = (3 * 100, 4 * 100) - - Case 2: - - 给定 - X.shape = (3, 100, 100, 4) - 且 - axis = 0 - 得到: - Out.shape = (1, 3 * 100 * 100 * 4) - -参数: - - **x** (Variable) - 一个维度数>=axis 的多维Tensor, 数据类型可以为float32,float64,int8,int32或int64。 - - **axis** (int) - flatten展开的分割轴,[0, axis) 轴数据被flatten到输出矩阵的0轴,[axis, R)数据被flatten到输出矩阵的1轴,其中R是输入张量的总维度数。axis的值必须在[0,R]范围内。当 axis=0 时,若输入Tensor的维度为 :math:`[d_0, d_1,… d_n]` ,则输出张量的Tensor维度为 :math:`[1,d_0 * d_1 *… d_n]` ,默认值为1。 - - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - -返回: 一个 2-D Tensor,它包含输入Tensor的数据,但维度发生变化。输入的[0, axis)维将沿axis展平到输出Tensor的0维度,剩余的输入维数展平到输出的1维度。数据类型与输入x相同。 - -返回类型: Variable - -抛出异常: - - ValueError: 如果 x 不是一个Variable - - ValueError: 如果axis的范围不在 [0, rank(x)] 范围内 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - x = fluid.layers.data(name="x", shape=[4, 4, 3], append_batch_size=False, dtype="float32") - # x shape is [4, 4, 3] - out = fluid.layers.flatten(x=x, axis=2) - # out shape is [16, 3] - - - diff --git a/doc/paddle/api/paddle/fluid/layers/hard_shrink_cn.rst b/doc/paddle/api/paddle/fluid/layers/hard_shrink_cn.rst deleted file mode 100644 index d139a6a8f..000000000 --- a/doc/paddle/api/paddle/fluid/layers/hard_shrink_cn.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. _cn_api_fluid_layers_hard_shrink: - -hard_shrink -------------------------------- - -.. py:function:: paddle.fluid.layers.hard_shrink(x,threshold=None) - -:alias_main: paddle.nn.functional.hard_shrink -:alias: paddle.nn.functional.hard_shrink,paddle.nn.functional.activation.hard_shrink -:old_api: paddle.fluid.layers.hard_shrink - - - -HardShrink激活函数(HardShrink activation operator) - - -.. math:: - - out = \begin{cases} - x, \text{if } x > \lambda \\ - x, \text{if } x < -\lambda \\ - 0, \text{otherwise} - \end{cases} - -参数: - - **x** - HardShrink激活函数的输入 - - **threshold** (FLOAT)-HardShrink激活函数的threshold值。[默认:0.5] - -返回:HardShrink激活函数的输出 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - data = fluid.layers.data(name="input", shape=[784]) - result = fluid.layers.hard_shrink(x=data, threshold=0.3) - - - - - - - - - diff --git a/doc/paddle/api/paddle/fluid/layers/margin_rank_loss_cn.rst b/doc/paddle/api/paddle/fluid/layers/margin_rank_loss_cn.rst deleted file mode 100644 index 0412f85fc..000000000 --- a/doc/paddle/api/paddle/fluid/layers/margin_rank_loss_cn.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. _cn_api_fluid_layers_margin_rank_loss: - -margin_rank_loss -------------------------------- - -.. py:function:: paddle.fluid.layers.margin_rank_loss(label, left, right, margin=0.1, name=None) - -:alias_main: paddle.nn.functional.margin_rank_loss -:alias: paddle.nn.functional.margin_rank_loss,paddle.nn.functional.loss.margin_rank_loss -:old_api: paddle.fluid.layers.margin_rank_loss - - - -margin rank loss(间隔排序损失)层。在排序问题中,它可以比较来自排序网络的输入 ``left`` 和输入 ``right`` 的得分。 - -可用如下等式定义: - -.. math:: - rank\_loss = max(0, -label * (left - right) + margin) - - -参数: - - **label** (Variable) – 表示输入 ``left`` 的真实排序是否高于输入 ``right`` , 数据类型为 float32。 - - **left** (Variable) – 输入 ``left`` 的排序得分, 数据类型为 float32 。 - - **right** (Variable) – 输入 ``right`` 的排序得分, 数据类型为 float32。 - - **margin** (float) – 指定的间隔。 - - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 - -返回: 排序损失 - -返回类型: Variable - -抛出异常: - - ``ValueError`` - ``label`` , ``left`` , ``right`` 有一者不为Variable类型时,抛出此异常 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - label = fluid.layers.data(name="label", shape=[-1, 1], dtype="float32") - left = fluid.layers.data(name="left", shape=[-1, 1], dtype="float32") - right = fluid.layers.data(name="right", shape=[-1, 1], dtype="float32") - out = fluid.layers.margin_rank_loss(label, left, right) - - - - - - - - - - - diff --git a/doc/paddle/api/paddle/fluid/layers/mean_cn.rst b/doc/paddle/api/paddle/fluid/layers/mean_cn.rst deleted file mode 100644 index 69363e1c6..000000000 --- a/doc/paddle/api/paddle/fluid/layers/mean_cn.rst +++ /dev/null @@ -1,52 +0,0 @@ -.. _cn_api_fluid_layers_mean: - -mean -------------------------------- - -.. py:function:: paddle.fluid.layers.mean(x, name=None) - -:alias_main: paddle.mean -:alias: paddle.mean,paddle.tensor.mean,paddle.tensor.stat.mean -:old_api: paddle.fluid.layers.mean - - - -计算 ``x`` 所有元素的平均值。 - -参数: - - **x** (Variable) : Tensor 或 LoDTensor。均值运算的输入。 - - **name** (basestring | None) : 输出变量的名称。 - -返回: - - Variable: 包含输出均值的 Tensor / LoDTensor。 - -返回类型: - - Variable(变量)。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - import numpy - - # Graph Organizing - input = fluid.layers.data( - name='data', shape=[2, 3], dtype='float32') - output = fluid.layers.mean(input) - - # Create an executor using CPU as an example - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - # Execute - x_ndarray = numpy.ones([2, 3]).astype(numpy.float32) - res, = exe.run(fluid.default_main_program(), - feed={'data':x_ndarray}, - fetch_list=[output]) - print(res) - ''' - Output Value: - [1.] - ''' diff --git a/doc/paddle/api/paddle/fluid/name_scope_cn.rst b/doc/paddle/api/paddle/fluid/name_scope_cn.rst deleted file mode 100644 index bf17054ba..000000000 --- a/doc/paddle/api/paddle/fluid/name_scope_cn.rst +++ /dev/null @@ -1,53 +0,0 @@ -.. _cn_api_fluid_name_scope: - -name_scope -------------------------------- - - -.. py:function:: paddle.fluid.name_scope(prefix=None) - -:api_attr: 声明式编程模式(静态图) - - - - -该函数为operators生成不同的命名空间。该函数只用于调试和可视化,不建议用在其它方面。 - - -参数: - - **prefix** (str,可选) - 名称前缀。默认值为None。 - -**示例代码** - -.. code-block:: python - - import paddle.fluid as fluid - with fluid.name_scope("s1"): - a = fluid.data(name='data', shape=[None, 1], dtype='int32') - b = a + 1 - with fluid.name_scope("s2"): - c = b * 1 - with fluid.name_scope("s3"): - d = c / 1 - with fluid.name_scope("s1"): - f = fluid.layers.pow(d, 2.0) - with fluid.name_scope("s4"): - g = f - 1 - - # 没有指定的话默认OP在default main program中。 - for op in fluid.default_main_program().block(0).ops: - # elementwise_add在/s1/中创建 - if op.type == 'elementwise_add': - assert op.desc.attr("op_namescope") == '/s1/' - # elementwise_mul在/s1/s2中创建 - elif op.type == 'elementwise_mul': - assert op.desc.attr("op_namescope") == '/s1/s2/' - # elementwise_div在/s1/s3中创建 - elif op.type == 'elementwise_div': - assert op.desc.attr("op_namescope") == '/s1/s3/' - # elementwise_sum在/s4/中创建 - elif op.type == 'elementwise_sub': - assert op.desc.attr("op_namescope") == '/s4/' - # pow在/s1_1/中创建 - elif op.type == 'pow': - assert op.desc.attr("op_namescope") == '/s1_1/' diff --git a/doc/paddle/api/paddle/fluid/program_guard_cn.rst b/doc/paddle/api/paddle/fluid/program_guard_cn.rst deleted file mode 100644 index d1b9e68b0..000000000 --- a/doc/paddle/api/paddle/fluid/program_guard_cn.rst +++ /dev/null @@ -1,43 +0,0 @@ -.. _cn_api_fluid_program_guard: - -program_guard -------------------------------- - - -.. py:function:: paddle.fluid.program_guard(main_program, startup_program=None) - -:api_attr: 声明式编程模式(静态图) - - - -该接口应配合使用python的 ``with`` 语句来将 ``with`` block 里的算子和变量添加进指定的全局主程序(main program)和启动程序(startup program)。 - -``with`` 语句块中的fluid.layers下各接口将在新的main program(主程序)中添加operators(算子)和variables(变量)。 - -参数: - - **main_program** (Program) – “with”语句中将使用的新的main program。 - - **startup_program** (Program,可选) – “with”语句中将使用的新的startup program。若传入 ``None`` 则不改变当前的启动程序,即仍使用default_startup_program。默认值为None。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): - data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') - hidden = fluid.layers.fc(input=data, size=10, act='relu') - -例如,当组的网不需要startup_program初始化各变量时,可以传入一个临时的program。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - main_program = fluid.Program() - # 如果您不需要关心startup program,传入一个临时值即可 - with fluid.program_guard(main_program, fluid.Program()): - data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') - diff --git a/doc/paddle/api/paddle/fluid/save_cn.rst b/doc/paddle/api/paddle/fluid/save_cn.rst deleted file mode 100644 index f0276316b..000000000 --- a/doc/paddle/api/paddle/fluid/save_cn.rst +++ /dev/null @@ -1,80 +0,0 @@ -.. _cn_api_fluid_dygraph_jit_save: - -save ------------------ - -.. py:function:: paddle.fluid.dygraph.jit.save(layer, model_path, input_spec=None, configs=None) - -将输入的经过 ``@declarative`` 装饰的 :ref:`cn_api_fluid_dygraph_Layer` 存储为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 格式的模型, -载入后可用于预测推理或者fine-tune训练。 - -该接口将会将输入 :ref:`cn_api_fluid_dygraph_Layer` 转写后的模型结构 ``Program`` 和所有必要的持久参数变量存储至输入路径 ``model_path`` 中。 - -默认存储的 ``Program`` 文件名为 ``__model__``, 默认存储持久参数变量的文件名为 ``__variables__``, -同时会将变量的一些描述信息存储至文件 ``__variables.info__``,这些额外的信息将在fine-tune训练中使用。 - -存储的模型能够被以下API载入使用: - - :ref:`cn_api_fluid_dygraph_jit_load` - - :ref:`cn_api_fluid_io_load_inference_model` (需要配置参数 ``params_filename='__variables__'`` ) - - 其他预测库API - -参数: - - **layer** (Layer) - 需要存储的 :ref:`cn_api_fluid_dygraph_Layer` 对象。输入的 ``Layer`` 需要经过 ``@declarative`` 装饰。 - - **model_path** (str) - 存储模型的目录。 - - **input_spec** (list[Variable], 可选) - 描述存储模型的输入。此参数是传入当前存储的 ``TranslatedLayer`` forward方法的一个示例输入。如果为 ``None`` ,所有原 ``Layer`` forward方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。 - - **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 - -返回:无 - -**示例代码** - -.. code-block:: python - - import numpy as np - import paddle.fluid as fluid - from paddle.fluid.dygraph import Linear - from paddle.fluid.dygraph import declarative - BATCH_SIZE = 32 - BATCH_NUM = 20 - def random_batch_reader(): - def _get_random_images_and_labels(image_shape, label_shape): - image = np.random.random(size=image_shape).astype('float32') - label = np.random.random(size=label_shape).astype('int64') - return image, label - def __reader__(): - for _ in range(BATCH_NUM): - batch_image, batch_label = _get_random_images_and_labels( - [BATCH_SIZE, 784], [BATCH_SIZE, 1]) - yield batch_image, batch_label - return __reader__ - class LinearNet(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): - super(LinearNet, self).__init__() - self._linear = Linear(in_size, out_size) - @declarative - def forward(self, x): - return self._linear(x) - # 开启命令式编程模式 - fluid.enable_dygraph() - # 创建网络 - net = LinearNet(784, 1) - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) - # 创建DataLoader - train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator(random_batch_reader()) - # 训练 - for data in train_loader(): - img, label = data - label.stop_gradient = True - cost = net(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - avg_loss.backward() - adam.minimize(avg_loss) - net.clear_gradients() - # 存储模型 - model_path = "linear.example.model" - fluid.dygraph.jit.save( - layer=net, - model_path=model_path, - input_spec=[img]) diff --git a/doc/paddle/api/paddle/fluid/scope_guard_cn.rst b/doc/paddle/api/paddle/fluid/scope_guard_cn.rst deleted file mode 100644 index e220cd8d4..000000000 --- a/doc/paddle/api/paddle/fluid/scope_guard_cn.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. _cn_api_fluid_executor_scope_guard: - -scope_guard -------------------------------- - - -.. py:function:: paddle.fluid.executor.scope_guard (scope) - -:api_attr: 声明式编程模式(静态图) - - - - -该接口通过 python 的 ``with`` 语句切换作用域(scope)。 -作用域记录了变量名和变量 ( :ref:`api_guide_Variable` ) 之间的映射关系,类似于编程语言中的大括号。 -如果未调用此接口,所有的变量和变量名都会被记录在默认的全局作用域中。 -当用户需要创建同名的变量时,如果不希望同名的变量映射关系被覆盖,则需要通过该接口切换作用域。 -通过 ``with`` 语句切换后,``with`` 语句块中所有创建的变量都将分配给新的作用域。 - -参数: - - **scope** (Scope) - 新的作用域。 - -返回:无 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy - - new_scope = fluid.Scope() - with fluid.scope_guard(new_scope): - fluid.global_scope().var("data").get_tensor().set(numpy.ones((1, 2)), fluid.CPUPlace()) - data = numpy.array(new_scope.find_var("data").get_tensor()) - print(data) # [[1. 1.]] diff --git a/doc/paddle/api/paddle/framework/get_default_dtype_cn.rst b/doc/paddle/api/paddle/framework/get_default_dtype_cn.rst new file mode 100644 index 000000000..2a3de9256 --- /dev/null +++ b/doc/paddle/api/paddle/framework/get_default_dtype_cn.rst @@ -0,0 +1,3 @@ +get +------------------------------- +**版本升级,文档正在开发中** diff --git a/doc/paddle/api/paddle/framework/load_cn.rst b/doc/paddle/api/paddle/framework/load_cn.rst deleted file mode 100644 index f00197697..000000000 --- a/doc/paddle/api/paddle/framework/load_cn.rst +++ /dev/null @@ -1,168 +0,0 @@ -.. _cn_api_fluid_dygraph_jit_load: - -load ------------------ - -.. py:function:: paddle.fluid.dygraph.jit.load(model_path, configs=None) - -:api_attr: 命令式编程模式(动态图) - -将接口 :ref:`cn_api_fluid_dygraph_jit_save` 或者 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型载入为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` ,用于预测推理或者fine-tune训练。 - -.. note:: - 由于一些历史原因,如果载入的模型是通过 :ref:`cn_api_fluid_io_save_inference_model` 存储的, - 在使用它进行fine-tune训练时会存在一些局限: - 1. 命令式编程模式不支持 ``LoDTensor`` ,所有原先输入变量或者参数依赖于LoD信息的模型暂时无法使用; - 2. 所有存储模型的feed变量都需要被传入 ``Translatedlayer`` 的forward方法; - 3. 原模型变量的 ``stop_gradient`` 信息已丢失且无法准确恢复; - 4. 原模型参数的 ``trainable`` 信息已丢失且无法准确恢复。 - -参数: - - **model_path** (str) - 存储模型的目录。 - - **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 - -返回:TranslatedLayer - 一个能够执行存储模型的 ``Layer`` 对象。 - -**示例代码** - -1. 载入由接口 :ref:`cn_api_fluid_dygraph_jit_save` 存储的模型进行预测推理及fine-tune训练。 - - .. code-block:: python - - import numpy as np - import paddle.fluid as fluid - from paddle.fluid.dygraph import Linear - from paddle.fluid.dygraph import declarative - BATCH_SIZE = 32 - BATCH_NUM = 20 - def random_batch_reader(): - def _get_random_images_and_labels(image_shape, label_shape): - image = np.random.random(size=image_shape).astype('float32') - label = np.random.random(size=label_shape).astype('int64') - return image, label - def __reader__(): - for _ in range(BATCH_NUM): - batch_image, batch_label = _get_random_images_and_labels( - [BATCH_SIZE, 784], [BATCH_SIZE, 1]) - yield batch_image, batch_label - return __reader__ - class LinearNet(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): - super(LinearNet, self).__init__() - self._linear = Linear(in_size, out_size) - @declarative - def forward(self, x): - return self._linear(x) - # 开启命令式编程模式 - fluid.enable_dygraph() - # 1. 训练存储模型. - # 创建网络 - net = LinearNet(784, 1) - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) - # 创建DataLoader - train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator(random_batch_reader()) - # 训练 - for data in train_loader(): - img, label = data - label.stop_gradient = True - cost = net(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - avg_loss.backward() - adam.minimize(avg_loss) - net.clear_gradients() - model_path = "linear.example.model" - fluid.dygraph.jit.save( - layer=net, - model_path=model_path, - input_spec=[img]) - # 2. 载入模型 & 预测 - # 载入模型 - infer_net = fluid.dygraph.jit.load(model_path) - # 预测 - x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) - pred = infer_net(x) - # 3. 载入模型 & fine-tune训练 - # 载入模型 - train_net = fluid.dygraph.jit.load(model_path) - train_net.train() - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=train_net.parameters()) - # 创建DataLoader - train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator(random_batch_reader()) - # fine-tune训练 - for data in train_loader(): - img, label = data - label.stop_gradient = True - cost = train_net(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - avg_loss.backward() - adam.minimize(avg_loss) - train_net.clear_gradients() - - -2. 载入由接口 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型进行预测推理及fine-tune训练。 - - .. code-block:: python - - import numpy as np - import paddle.fluid as fluid - BATCH_SIZE = 32 - BATCH_NUM = 20 - def random_batch_reader(): - def _get_random_images_and_labels(image_shape, label_shape): - image = np.random.random(size=image_shape).astype('float32') - label = np.random.random(size=label_shape).astype('int64') - return image, label - def __reader__(): - for _ in range(BATCH_NUM): - batch_image, batch_label = _get_random_images_and_labels( - [BATCH_SIZE, 784], [BATCH_SIZE, 1]) - yield batch_image, batch_label - return __reader__ - img = fluid.data(name='img', shape=[None, 784], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') - pred = fluid.layers.fc(input=img, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=pred, label=label) - avg_loss = fluid.layers.mean(loss) - optimizer = fluid.optimizer.SGD(learning_rate=0.001) - optimizer.minimize(avg_loss) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - loader = fluid.io.DataLoader.from_generator( - feed_list=[img, label], capacity=5, iterable=True) - loader.set_batch_generator(random_batch_reader(), places=place) - # 1. 训练 & 存储预测模型 - for data in loader(): - exe.run( - fluid.default_main_program(), - feed=data, - fetch_list=[avg_loss]) - model_path = "fc.example.model" - fluid.io.save_inference_model( - model_path, ["img"], [pred], exe) - # 开启命令式编程模式 - fluid.enable_dygraph() - # 2. 载入模型 & 预测 - fc = fluid.dygraph.jit.load(model_path) - x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) - pred = fc(x) - # 3. 载入模型 & fine-tune训练 - fc = fluid.dygraph.jit.load(model_path) - fc.train() - sgd = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=fc.parameters()) - train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator( - random_batch_reader(), places=place) - for data in train_loader(): - img, label = data - label.stop_gradient = True - cost = fc(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - avg_loss.backward() - sgd.minimize(avg_loss) diff --git a/doc/paddle/api/paddle/framework/save_cn.rst b/doc/paddle/api/paddle/framework/save_cn.rst deleted file mode 100644 index f0276316b..000000000 --- a/doc/paddle/api/paddle/framework/save_cn.rst +++ /dev/null @@ -1,80 +0,0 @@ -.. _cn_api_fluid_dygraph_jit_save: - -save ------------------ - -.. py:function:: paddle.fluid.dygraph.jit.save(layer, model_path, input_spec=None, configs=None) - -将输入的经过 ``@declarative`` 装饰的 :ref:`cn_api_fluid_dygraph_Layer` 存储为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 格式的模型, -载入后可用于预测推理或者fine-tune训练。 - -该接口将会将输入 :ref:`cn_api_fluid_dygraph_Layer` 转写后的模型结构 ``Program`` 和所有必要的持久参数变量存储至输入路径 ``model_path`` 中。 - -默认存储的 ``Program`` 文件名为 ``__model__``, 默认存储持久参数变量的文件名为 ``__variables__``, -同时会将变量的一些描述信息存储至文件 ``__variables.info__``,这些额外的信息将在fine-tune训练中使用。 - -存储的模型能够被以下API载入使用: - - :ref:`cn_api_fluid_dygraph_jit_load` - - :ref:`cn_api_fluid_io_load_inference_model` (需要配置参数 ``params_filename='__variables__'`` ) - - 其他预测库API - -参数: - - **layer** (Layer) - 需要存储的 :ref:`cn_api_fluid_dygraph_Layer` 对象。输入的 ``Layer`` 需要经过 ``@declarative`` 装饰。 - - **model_path** (str) - 存储模型的目录。 - - **input_spec** (list[Variable], 可选) - 描述存储模型的输入。此参数是传入当前存储的 ``TranslatedLayer`` forward方法的一个示例输入。如果为 ``None`` ,所有原 ``Layer`` forward方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。 - - **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 - -返回:无 - -**示例代码** - -.. code-block:: python - - import numpy as np - import paddle.fluid as fluid - from paddle.fluid.dygraph import Linear - from paddle.fluid.dygraph import declarative - BATCH_SIZE = 32 - BATCH_NUM = 20 - def random_batch_reader(): - def _get_random_images_and_labels(image_shape, label_shape): - image = np.random.random(size=image_shape).astype('float32') - label = np.random.random(size=label_shape).astype('int64') - return image, label - def __reader__(): - for _ in range(BATCH_NUM): - batch_image, batch_label = _get_random_images_and_labels( - [BATCH_SIZE, 784], [BATCH_SIZE, 1]) - yield batch_image, batch_label - return __reader__ - class LinearNet(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): - super(LinearNet, self).__init__() - self._linear = Linear(in_size, out_size) - @declarative - def forward(self, x): - return self._linear(x) - # 开启命令式编程模式 - fluid.enable_dygraph() - # 创建网络 - net = LinearNet(784, 1) - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) - # 创建DataLoader - train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator(random_batch_reader()) - # 训练 - for data in train_loader(): - img, label = data - label.stop_gradient = True - cost = net(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - avg_loss.backward() - adam.minimize(avg_loss) - net.clear_gradients() - # 存储模型 - model_path = "linear.example.model" - fluid.dygraph.jit.save( - layer=net, - model_path=model_path, - input_spec=[img]) diff --git a/doc/paddle/api/paddle/framework/set_default_dtype_cn.rst b/doc/paddle/api/paddle/framework/set_default_dtype_cn.rst new file mode 100644 index 000000000..3d7928489 --- /dev/null +++ b/doc/paddle/api/paddle/framework/set_default_dtype_cn.rst @@ -0,0 +1,3 @@ +set +------------------------------- +**版本升级,文档正在开发中** diff --git a/doc/paddle/api/paddle/fluid/layers/gather_nd_cn.rst b/doc/paddle/api/paddle/gather_nd_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/gather_nd_cn.rst rename to doc/paddle/api/paddle/gather_nd_cn.rst diff --git a/doc/paddle/api/paddle/framework/grad_cn.rst b/doc/paddle/api/paddle/grad_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/grad_cn.rst rename to doc/paddle/api/paddle/grad_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/has_inf_cn.rst b/doc/paddle/api/paddle/has_inf_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/has_inf_cn.rst rename to doc/paddle/api/paddle/has_inf_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/has_nan_cn.rst b/doc/paddle/api/paddle/has_nan_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/has_nan_cn.rst rename to doc/paddle/api/paddle/has_nan_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/increment_cn.rst b/doc/paddle/api/paddle/increment_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/increment_cn.rst rename to doc/paddle/api/paddle/increment_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/is_empty_cn.rst b/doc/paddle/api/paddle/is_empty_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/is_empty_cn.rst rename to doc/paddle/api/paddle/is_empty_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/isfinite_cn.rst b/doc/paddle/api/paddle/isfinite_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/isfinite_cn.rst rename to doc/paddle/api/paddle/isfinite_cn.rst diff --git a/doc/paddle/api/paddle/jit/load_cn.rst b/doc/paddle/api/paddle/jit/load_cn.rst index f00197697..a12a65fbe 100644 --- a/doc/paddle/api/paddle/jit/load_cn.rst +++ b/doc/paddle/api/paddle/jit/load_cn.rst @@ -1,168 +1,55 @@ -.. _cn_api_fluid_dygraph_jit_load: +.. _cn_api_fluid_load: load ------------------ - -.. py:function:: paddle.fluid.dygraph.jit.load(model_path, configs=None) - -:api_attr: 命令式编程模式(动态图) - -将接口 :ref:`cn_api_fluid_dygraph_jit_save` 或者 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型载入为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` ,用于预测推理或者fine-tune训练。 - -.. note:: - 由于一些历史原因,如果载入的模型是通过 :ref:`cn_api_fluid_io_save_inference_model` 存储的, - 在使用它进行fine-tune训练时会存在一些局限: - 1. 命令式编程模式不支持 ``LoDTensor`` ,所有原先输入变量或者参数依赖于LoD信息的模型暂时无法使用; - 2. 所有存储模型的feed变量都需要被传入 ``Translatedlayer`` 的forward方法; - 3. 原模型变量的 ``stop_gradient`` 信息已丢失且无法准确恢复; - 4. 原模型参数的 ``trainable`` 信息已丢失且无法准确恢复。 - -参数: - - **model_path** (str) - 存储模型的目录。 - - **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 - -返回:TranslatedLayer - 一个能够执行存储模型的 ``Layer`` 对象。 - -**示例代码** - -1. 载入由接口 :ref:`cn_api_fluid_dygraph_jit_save` 存储的模型进行预测推理及fine-tune训练。 - - .. code-block:: python - - import numpy as np - import paddle.fluid as fluid - from paddle.fluid.dygraph import Linear - from paddle.fluid.dygraph import declarative - BATCH_SIZE = 32 - BATCH_NUM = 20 - def random_batch_reader(): - def _get_random_images_and_labels(image_shape, label_shape): - image = np.random.random(size=image_shape).astype('float32') - label = np.random.random(size=label_shape).astype('int64') - return image, label - def __reader__(): - for _ in range(BATCH_NUM): - batch_image, batch_label = _get_random_images_and_labels( - [BATCH_SIZE, 784], [BATCH_SIZE, 1]) - yield batch_image, batch_label - return __reader__ - class LinearNet(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): - super(LinearNet, self).__init__() - self._linear = Linear(in_size, out_size) - @declarative - def forward(self, x): - return self._linear(x) - # 开启命令式编程模式 - fluid.enable_dygraph() - # 1. 训练存储模型. - # 创建网络 - net = LinearNet(784, 1) - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) - # 创建DataLoader - train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator(random_batch_reader()) - # 训练 - for data in train_loader(): - img, label = data - label.stop_gradient = True - cost = net(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - avg_loss.backward() - adam.minimize(avg_loss) - net.clear_gradients() - model_path = "linear.example.model" - fluid.dygraph.jit.save( - layer=net, - model_path=model_path, - input_spec=[img]) - # 2. 载入模型 & 预测 - # 载入模型 - infer_net = fluid.dygraph.jit.load(model_path) - # 预测 - x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) - pred = infer_net(x) - # 3. 载入模型 & fine-tune训练 - # 载入模型 - train_net = fluid.dygraph.jit.load(model_path) - train_net.train() - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=train_net.parameters()) - # 创建DataLoader - train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator(random_batch_reader()) - # fine-tune训练 - for data in train_loader(): - img, label = data - label.stop_gradient = True - cost = train_net(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - avg_loss.backward() - adam.minimize(avg_loss) - train_net.clear_gradients() - - -2. 载入由接口 :ref:`cn_api_fluid_io_save_inference_model` 存储的模型进行预测推理及fine-tune训练。 - - .. code-block:: python - - import numpy as np - import paddle.fluid as fluid - BATCH_SIZE = 32 - BATCH_NUM = 20 - def random_batch_reader(): - def _get_random_images_and_labels(image_shape, label_shape): - image = np.random.random(size=image_shape).astype('float32') - label = np.random.random(size=label_shape).astype('int64') - return image, label - def __reader__(): - for _ in range(BATCH_NUM): - batch_image, batch_label = _get_random_images_and_labels( - [BATCH_SIZE, 784], [BATCH_SIZE, 1]) - yield batch_image, batch_label - return __reader__ - img = fluid.data(name='img', shape=[None, 784], dtype='float32') - label = fluid.data(name='label', shape=[None, 1], dtype='int64') - pred = fluid.layers.fc(input=img, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=pred, label=label) - avg_loss = fluid.layers.mean(loss) - optimizer = fluid.optimizer.SGD(learning_rate=0.001) - optimizer.minimize(avg_loss) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - loader = fluid.io.DataLoader.from_generator( - feed_list=[img, label], capacity=5, iterable=True) - loader.set_batch_generator(random_batch_reader(), places=place) - # 1. 训练 & 存储预测模型 - for data in loader(): - exe.run( - fluid.default_main_program(), - feed=data, - fetch_list=[avg_loss]) - model_path = "fc.example.model" - fluid.io.save_inference_model( - model_path, ["img"], [pred], exe) - # 开启命令式编程模式 - fluid.enable_dygraph() - # 2. 载入模型 & 预测 - fc = fluid.dygraph.jit.load(model_path) - x = fluid.dygraph.to_variable(np.random.random((1, 784)).astype('float32')) - pred = fc(x) - # 3. 载入模型 & fine-tune训练 - fc = fluid.dygraph.jit.load(model_path) - fc.train() - sgd = fluid.optimizer.SGD(learning_rate=0.001, - parameter_list=fc.parameters()) - train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator( - random_batch_reader(), places=place) - for data in train_loader(): - img, label = data - label.stop_gradient = True - cost = fc(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - avg_loss.backward() - sgd.minimize(avg_loss) +------------------------------- + +.. py:function:: paddle.fluid.load(program, model_path, executor=None, var_list=None) + +:api_attr: 声明式编程模式(静态图) + + + +该接口从Program中过滤出参数和优化器信息,然后从文件中获取相应的值。 + +如果Program和加载的文件之间参数的维度或数据类型不匹配,将引发异常。 + +该函数还可以加载用[save_params,save_persistables,save_vars]接口保存的模型文件。 +当[save_params,save_persistables,save_vars]保存的模型格式为单个大文件时,var_list不能为None。 + +参数: + - **program** ( :ref:`cn_api_fluid_Program` ) – 要加载的Program。 + - **model_path** (str) – 保存Program的目录名称+文件前缀。格式为 ``目录名称/文件前缀`` 。 + - **executor** (Executor, 可选) - 当startup program没有运行时,用于初始化参数的Executor。默认值:None。 + - **var_list** (list, 可选) - 指定加载的变量列表,该参数只在加载旧接口[save_params,save_persistables,save_vars]保存的模型文件时使用。当加载的是多个小文件时,变量列表可以是所有加载文件中变量的子集;当加载的单个大文件时,变量列表必须和加载文件中的变量保持一致。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + # example1 + import paddle.fluid as fluid + + x = fluid.data( name="x", shape=[10, 10], dtype='float32') + y = fluid.layers.fc(x, 10) + z = fluid.layers.fc(y, 10) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + fluid.save(fluid.default_main_program(), "./test_path") + fluid.load(fluid.default_main_program(), "./test_path") + + # example2 + # 注意example1和example2应该分开执行,避免干扰。 + import paddle.fluid as fluid + + x = fluid.data( name="x", shape=[10, 10], dtype='float32') + y = fluid.layers.fc(x, 10) + z = fluid.layers.fc(y, 10) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + fluid.save(fluid.default_main_program(), "./test_path") + fluid.load(fluid.default_main_program(), "./test_path", exe) + diff --git a/doc/paddle/api/paddle/jit/save_cn.rst b/doc/paddle/api/paddle/jit/save_cn.rst index f0276316b..a15b23f2b 100644 --- a/doc/paddle/api/paddle/jit/save_cn.rst +++ b/doc/paddle/api/paddle/jit/save_cn.rst @@ -1,80 +1,51 @@ -.. _cn_api_fluid_dygraph_jit_save: +.. _cn_api_fluid_save: save ------------------ +------------------------------- -.. py:function:: paddle.fluid.dygraph.jit.save(layer, model_path, input_spec=None, configs=None) -将输入的经过 ``@declarative`` 装饰的 :ref:`cn_api_fluid_dygraph_Layer` 存储为 :ref:`cn_api_fluid_dygraph_TranslatedLayer` 格式的模型, -载入后可用于预测推理或者fine-tune训练。 +.. py:function:: paddle.fluid.save(program, model_path) -该接口将会将输入 :ref:`cn_api_fluid_dygraph_Layer` 转写后的模型结构 ``Program`` 和所有必要的持久参数变量存储至输入路径 ``model_path`` 中。 +:api_attr: 声明式编程模式(静态图) +:alias_main: paddle.save +:alias: paddle.save,paddle.tensor.save,paddle.tensor.io.save +:old_api: paddle.fluid.save -默认存储的 ``Program`` 文件名为 ``__model__``, 默认存储持久参数变量的文件名为 ``__variables__``, -同时会将变量的一些描述信息存储至文件 ``__variables.info__``,这些额外的信息将在fine-tune训练中使用。 -存储的模型能够被以下API载入使用: - - :ref:`cn_api_fluid_dygraph_jit_load` - - :ref:`cn_api_fluid_io_load_inference_model` (需要配置参数 ``params_filename='__variables__'`` ) - - 其他预测库API -参数: - - **layer** (Layer) - 需要存储的 :ref:`cn_api_fluid_dygraph_Layer` 对象。输入的 ``Layer`` 需要经过 ``@declarative`` 装饰。 - - **model_path** (str) - 存储模型的目录。 - - **input_spec** (list[Variable], 可选) - 描述存储模型的输入。此参数是传入当前存储的 ``TranslatedLayer`` forward方法的一个示例输入。如果为 ``None`` ,所有原 ``Layer`` forward方法的输入变量将都会被配置为存储模型的输入变量。默认为 ``None``。 - - **configs** (SaveLoadConfig, 可选) - 用于指定额外配置选项的 :ref:`cn_api_fluid_dygraph_jit_SaveLoadConfig` 对象。默认为 ``None``。 +该接口将传入的参数、优化器信息和网络描述保存到 ``model_path`` 。 -返回:无 +参数包含所有的可训练 :ref:`cn_api_fluid_Variable` ,将保存到后缀为 ``.pdparams`` 的文件中。 -**示例代码** +优化器信息包含优化器使用的所有变量。对于Adam优化器,包含beta1、beta2、momentum等。 +所有信息将保存到后缀为 ``.pdopt`` 的文件中。(如果优化器没有需要保存的变量(如sgd),则不会生成)。 + +网络描述是程序的描述。它只用于部署。描述将保存到后缀为 ``.pdmodel`` 的文件中。 + +参数: + - **program** ( :ref:`cn_api_fluid_Program` ) – 要保存的Program。 + - **model_path** (str) – 保存program的文件前缀。格式为 ``目录名称/文件前缀``。如果文件前缀为空字符串,会引发异常。 + +返回: 无 + +**代码示例** .. code-block:: python - import numpy as np import paddle.fluid as fluid - from paddle.fluid.dygraph import Linear - from paddle.fluid.dygraph import declarative - BATCH_SIZE = 32 - BATCH_NUM = 20 - def random_batch_reader(): - def _get_random_images_and_labels(image_shape, label_shape): - image = np.random.random(size=image_shape).astype('float32') - label = np.random.random(size=label_shape).astype('int64') - return image, label - def __reader__(): - for _ in range(BATCH_NUM): - batch_image, batch_label = _get_random_images_and_labels( - [BATCH_SIZE, 784], [BATCH_SIZE, 1]) - yield batch_image, batch_label - return __reader__ - class LinearNet(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): - super(LinearNet, self).__init__() - self._linear = Linear(in_size, out_size) - @declarative - def forward(self, x): - return self._linear(x) - # 开启命令式编程模式 - fluid.enable_dygraph() - # 创建网络 - net = LinearNet(784, 1) - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) - # 创建DataLoader - train_loader = fluid.io.DataLoader.from_generator(capacity=5) - train_loader.set_batch_generator(random_batch_reader()) - # 训练 - for data in train_loader(): - img, label = data - label.stop_gradient = True - cost = net(img) - loss = fluid.layers.cross_entropy(cost, label) - avg_loss = fluid.layers.mean(loss) - avg_loss.backward() - adam.minimize(avg_loss) - net.clear_gradients() - # 存储模型 - model_path = "linear.example.model" - fluid.dygraph.jit.save( - layer=net, - model_path=model_path, - input_spec=[img]) + + x = fluid.data(name="x", shape=[10, 10], dtype='float32') + y = fluid.layers.fc(x, 10) + z = fluid.layers.fc(y, 10) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + fluid.save(fluid.default_main_program(), "./test_path") + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/linspace_cn.rst b/doc/paddle/api/paddle/linspace_cn.rst similarity index 55% rename from doc/paddle/api/paddle/fluid/layers/linspace_cn.rst rename to doc/paddle/api/paddle/linspace_cn.rst index 7c228c413..ca9775016 100644 --- a/doc/paddle/api/paddle/fluid/layers/linspace_cn.rst +++ b/doc/paddle/api/paddle/linspace_cn.rst @@ -1,29 +1,24 @@ -.. _cn_api_tensor_linspace: +.. _cn_api_fluid_layers_linspace: linspace ------------------------------- -.. py:function:: paddle.linspace(start, stop, num, dtype=None, name=None) +.. py:function:: paddle.fluid.layers.linspace(start, stop, num, dtype=None, name=None) -:alias_main: paddle.linspace -:alias: paddle.tensor.linspace, paddle.tensor.creation.linspace 该OP返回一个Tensor,Tensor的值为在区间start和stop上均匀间隔的num个值,输出Tensor的长度为num。 - **注意:该OP不进行梯度计算** 参数: - **start** (float|Tensor) – ``start`` 是区间开始的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32或者是float64。 - **stop** (float|Tensor) – ``end`` 是区间结束的变量,可以是一个浮点标量,或是一个shape为[1]的Tensor,该Tensor的数据类型可以是float32或者是float64。 - **num** (int|Tensor) – ``num`` 是给定区间内需要划分的区间数,可以是一个整型标量,或是一个shape为[1]的Tensor,该Tensor的数据类型需为int32。 - - **dtype** (np.dtype|core.VarDesc.VarType|str,可选) – 输出Tensor的数据类型,可以是float32或者是float64。如果dtype为None,默认类型为float32。 - - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - -返回:输出结果的数据类型是float32或float64,表示等间隔划分结果的1-D Tensor,该Tensor的shape大小为 :math:`[num]` ,在mum为1的情况下,仅返回包含start元素值的Tensor。 + - **dtype** (string, 可选) – 输出Tensor的数据类型,可以是float32或者是float64,如果dtype的数据类型为None,输出Tensor数据类型为float32。 + - **name** (str, 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 -返回类型:Variable +返回:表示等间隔划分结果的1-D Tensor,该Tensor的shape大小为 :math:`[num]` ,在mum为1的情况下,仅返回包含start元素值的Tensor。 抛出异常: - ``TypeError`` - 当start或者stop的数据类型不是float32或者float64。 @@ -34,7 +29,11 @@ linspace .. code-block:: python - import paddle - data = paddle.linspace(0, 10, 5, dtype='float32') # [0.0, 2.5, 5.0, 7.5, 10.0] - data = paddle.linspace(0, 10, 1, dtype='float32') # [0.0] + import paddle.fluid as fluid + data = fluid.layers.linspace(0, 10, 5, 'float32') # [0.0, 2.5, 5.0, 7.5, 10.0] + data = fluid.layers.linspace(0, 10, 1, 'float32') # [0.0] + + + + diff --git a/doc/paddle/api/paddle/load_cn.rst b/doc/paddle/api/paddle/load_cn.rst new file mode 100644 index 000000000..a12a65fbe --- /dev/null +++ b/doc/paddle/api/paddle/load_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_fluid_load: + +load +------------------------------- + +.. py:function:: paddle.fluid.load(program, model_path, executor=None, var_list=None) + +:api_attr: 声明式编程模式(静态图) + + + +该接口从Program中过滤出参数和优化器信息,然后从文件中获取相应的值。 + +如果Program和加载的文件之间参数的维度或数据类型不匹配,将引发异常。 + +该函数还可以加载用[save_params,save_persistables,save_vars]接口保存的模型文件。 +当[save_params,save_persistables,save_vars]保存的模型格式为单个大文件时,var_list不能为None。 + +参数: + - **program** ( :ref:`cn_api_fluid_Program` ) – 要加载的Program。 + - **model_path** (str) – 保存Program的目录名称+文件前缀。格式为 ``目录名称/文件前缀`` 。 + - **executor** (Executor, 可选) - 当startup program没有运行时,用于初始化参数的Executor。默认值:None。 + - **var_list** (list, 可选) - 指定加载的变量列表,该参数只在加载旧接口[save_params,save_persistables,save_vars]保存的模型文件时使用。当加载的是多个小文件时,变量列表可以是所有加载文件中变量的子集;当加载的单个大文件时,变量列表必须和加载文件中的变量保持一致。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + # example1 + import paddle.fluid as fluid + + x = fluid.data( name="x", shape=[10, 10], dtype='float32') + y = fluid.layers.fc(x, 10) + z = fluid.layers.fc(y, 10) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + fluid.save(fluid.default_main_program(), "./test_path") + fluid.load(fluid.default_main_program(), "./test_path") + + # example2 + # 注意example1和example2应该分开执行,避免干扰。 + import paddle.fluid as fluid + + x = fluid.data( name="x", shape=[10, 10], dtype='float32') + y = fluid.layers.fc(x, 10) + z = fluid.layers.fc(y, 10) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + fluid.save(fluid.default_main_program(), "./test_path") + fluid.load(fluid.default_main_program(), "./test_path", exe) + diff --git a/doc/paddle/api/paddle/fluid/layers/log_cn.rst b/doc/paddle/api/paddle/log_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/log_cn.rst rename to doc/paddle/api/paddle/log_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/logical_and_cn.rst b/doc/paddle/api/paddle/logical_and_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/logical_and_cn.rst rename to doc/paddle/api/paddle/logical_and_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/logical_not_cn.rst b/doc/paddle/api/paddle/logical_not_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/logical_not_cn.rst rename to doc/paddle/api/paddle/logical_not_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/logical_or_cn.rst b/doc/paddle/api/paddle/logical_or_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/logical_or_cn.rst rename to doc/paddle/api/paddle/logical_or_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/logical_xor_cn.rst b/doc/paddle/api/paddle/logical_xor_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/logical_xor_cn.rst rename to doc/paddle/api/paddle/logical_xor_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/multiplex_cn.rst b/doc/paddle/api/paddle/multiplex_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/multiplex_cn.rst rename to doc/paddle/api/paddle/multiplex_cn.rst diff --git a/doc/paddle/api/paddle/fluid/dygraph/BatchNorm_cn.rst b/doc/paddle/api/paddle/nn/BatchNorm_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/dygraph/BatchNorm_cn.rst rename to doc/paddle/api/paddle/nn/BatchNorm_cn.rst diff --git a/doc/paddle/api/paddle/fluid/dygraph/BilinearTensorProduct_cn.rst b/doc/paddle/api/paddle/nn/BilinearTensorProduct_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/dygraph/BilinearTensorProduct_cn.rst rename to doc/paddle/api/paddle/nn/BilinearTensorProduct_cn.rst diff --git a/doc/paddle/api/paddle/fluid/dygraph/Embedding_cn.rst b/doc/paddle/api/paddle/nn/Embedding_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/dygraph/Embedding_cn.rst rename to doc/paddle/api/paddle/nn/Embedding_cn.rst diff --git a/doc/paddle/api/paddle/fluid/clip/GradientClipByGlobalNorm_cn.rst b/doc/paddle/api/paddle/nn/GradientClipByGlobalNorm_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/clip/GradientClipByGlobalNorm_cn.rst rename to doc/paddle/api/paddle/nn/GradientClipByGlobalNorm_cn.rst diff --git a/doc/paddle/api/paddle/fluid/clip/GradientClipByNorm_cn.rst b/doc/paddle/api/paddle/nn/GradientClipByNorm_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/clip/GradientClipByNorm_cn.rst rename to doc/paddle/api/paddle/nn/GradientClipByNorm_cn.rst diff --git a/doc/paddle/api/paddle/fluid/clip/GradientClipByValue_cn.rst b/doc/paddle/api/paddle/nn/GradientClipByValue_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/clip/GradientClipByValue_cn.rst rename to doc/paddle/api/paddle/nn/GradientClipByValue_cn.rst diff --git a/doc/paddle/api/paddle/fluid/dygraph/GroupNorm_cn.rst b/doc/paddle/api/paddle/nn/GroupNorm_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/dygraph/GroupNorm_cn.rst rename to doc/paddle/api/paddle/nn/GroupNorm_cn.rst diff --git a/doc/paddle/api/paddle/fluid/dygraph/container/LayerList_cn.rst b/doc/paddle/api/paddle/nn/LayerList_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/dygraph/container/LayerList_cn.rst rename to doc/paddle/api/paddle/nn/LayerList_cn.rst diff --git a/doc/paddle/api/paddle/fluid/dygraph/LayerNorm_cn.rst b/doc/paddle/api/paddle/nn/LayerNorm_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/dygraph/LayerNorm_cn.rst rename to doc/paddle/api/paddle/nn/LayerNorm_cn.rst diff --git a/doc/paddle/api/paddle/fluid/dygraph/layers/Layer_cn.rst b/doc/paddle/api/paddle/nn/Layer_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/dygraph/layers/Layer_cn.rst rename to doc/paddle/api/paddle/nn/Layer_cn.rst diff --git a/doc/paddle/api/paddle/fluid/dygraph/Linear_cn.rst b/doc/paddle/api/paddle/nn/Linear_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/dygraph/Linear_cn.rst rename to doc/paddle/api/paddle/nn/Linear_cn.rst diff --git a/doc/paddle/api/paddle/fluid/dygraph/container/ParameterList_cn.rst b/doc/paddle/api/paddle/nn/ParameterList_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/dygraph/container/ParameterList_cn.rst rename to doc/paddle/api/paddle/nn/ParameterList_cn.rst diff --git a/doc/paddle/api/paddle/fluid/dygraph/Pool2D_cn.rst b/doc/paddle/api/paddle/nn/Pool2D_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/dygraph/Pool2D_cn.rst rename to doc/paddle/api/paddle/nn/Pool2D_cn.rst diff --git a/doc/paddle/api/paddle/fluid/dygraph/container/Sequential_cn.rst b/doc/paddle/api/paddle/nn/Sequential_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/dygraph/container/Sequential_cn.rst rename to doc/paddle/api/paddle/nn/Sequential_cn.rst diff --git a/doc/paddle/api/paddle/fluid/dygraph/SpectralNorm_cn.rst b/doc/paddle/api/paddle/nn/SpectralNorm_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/dygraph/SpectralNorm_cn.rst rename to doc/paddle/api/paddle/nn/SpectralNorm_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/beam_search_cn.rst b/doc/paddle/api/paddle/nn/beam_search_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/beam_search_cn.rst rename to doc/paddle/api/paddle/nn/beam_search_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/beam_search_decode_cn.rst b/doc/paddle/api/paddle/nn/beam_search_decode_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/beam_search_decode_cn.rst rename to doc/paddle/api/paddle/nn/beam_search_decode_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/case_cn.rst b/doc/paddle/api/paddle/nn/case_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/case_cn.rst rename to doc/paddle/api/paddle/nn/case_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/clip_by_norm_cn.rst b/doc/paddle/api/paddle/nn/clip_by_norm_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/clip_by_norm_cn.rst rename to doc/paddle/api/paddle/nn/clip_by_norm_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/clip_cn.rst b/doc/paddle/api/paddle/nn/clip_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/clip_cn.rst rename to doc/paddle/api/paddle/nn/clip_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/cond_cn.rst b/doc/paddle/api/paddle/nn/cond_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/cond_cn.rst rename to doc/paddle/api/paddle/nn/cond_cn.rst diff --git a/doc/paddle/api/paddle/fluid/data_cn.rst b/doc/paddle/api/paddle/nn/data_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/data_cn.rst rename to doc/paddle/api/paddle/nn/data_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/softmax_cn.rst b/doc/paddle/api/paddle/nn/functional/activation/softmax_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/softmax_cn.rst rename to doc/paddle/api/paddle/nn/functional/activation/softmax_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/adaptive_pool2d_cn.rst b/doc/paddle/api/paddle/nn/functional/adaptive_pool2d_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/adaptive_pool2d_cn.rst rename to doc/paddle/api/paddle/nn/functional/adaptive_pool2d_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/adaptive_pool3d_cn.rst b/doc/paddle/api/paddle/nn/functional/adaptive_pool3d_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/adaptive_pool3d_cn.rst rename to doc/paddle/api/paddle/nn/functional/adaptive_pool3d_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/add_position_encoding_cn.rst b/doc/paddle/api/paddle/nn/functional/add_position_encoding_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/add_position_encoding_cn.rst rename to doc/paddle/api/paddle/nn/functional/add_position_encoding_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/affine_channel_cn.rst b/doc/paddle/api/paddle/nn/functional/affine_channel_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/affine_channel_cn.rst rename to doc/paddle/api/paddle/nn/functional/affine_channel_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/affine_grid_cn.rst b/doc/paddle/api/paddle/nn/functional/affine_grid_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/affine_grid_cn.rst rename to doc/paddle/api/paddle/nn/functional/affine_grid_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/anchor_generator_cn.rst b/doc/paddle/api/paddle/nn/functional/anchor_generator_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/anchor_generator_cn.rst rename to doc/paddle/api/paddle/nn/functional/anchor_generator_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/assign_cn.rst b/doc/paddle/api/paddle/nn/functional/assign_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/assign_cn.rst rename to doc/paddle/api/paddle/nn/functional/assign_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/bipartite_match_cn.rst b/doc/paddle/api/paddle/nn/functional/bipartite_match_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/bipartite_match_cn.rst rename to doc/paddle/api/paddle/nn/functional/bipartite_match_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/box_clip_cn.rst b/doc/paddle/api/paddle/nn/functional/box_clip_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/box_clip_cn.rst rename to doc/paddle/api/paddle/nn/functional/box_clip_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/box_coder_cn.rst b/doc/paddle/api/paddle/nn/functional/box_coder_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/box_coder_cn.rst rename to doc/paddle/api/paddle/nn/functional/box_coder_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/box_decoder_and_assign_cn.rst b/doc/paddle/api/paddle/nn/functional/box_decoder_and_assign_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/box_decoder_and_assign_cn.rst rename to doc/paddle/api/paddle/nn/functional/box_decoder_and_assign_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/bpr_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/bpr_loss_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/bpr_loss_cn.rst rename to doc/paddle/api/paddle/nn/functional/bpr_loss_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/brelu_cn.rst b/doc/paddle/api/paddle/nn/functional/brelu_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/brelu_cn.rst rename to doc/paddle/api/paddle/nn/functional/brelu_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/center_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/center_loss_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/center_loss_cn.rst rename to doc/paddle/api/paddle/nn/functional/center_loss_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/collect_fpn_proposals_cn.rst b/doc/paddle/api/paddle/nn/functional/collect_fpn_proposals_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/collect_fpn_proposals_cn.rst rename to doc/paddle/api/paddle/nn/functional/collect_fpn_proposals_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/continuous_value_model_cn.rst b/doc/paddle/api/paddle/nn/functional/continuous_value_model_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/continuous_value_model_cn.rst rename to doc/paddle/api/paddle/nn/functional/continuous_value_model_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/cosine_decay_cn.rst b/doc/paddle/api/paddle/nn/functional/cosine_decay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/cosine_decay_cn.rst rename to doc/paddle/api/paddle/nn/functional/cosine_decay_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/cross_entropy_cn.rst b/doc/paddle/api/paddle/nn/functional/cross_entropy_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/cross_entropy_cn.rst rename to doc/paddle/api/paddle/nn/functional/cross_entropy_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/deformable_roi_pooling_cn.rst b/doc/paddle/api/paddle/nn/functional/deformable_roi_pooling_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/deformable_roi_pooling_cn.rst rename to doc/paddle/api/paddle/nn/functional/deformable_roi_pooling_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/density_prior_box_cn.rst b/doc/paddle/api/paddle/nn/functional/density_prior_box_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/density_prior_box_cn.rst rename to doc/paddle/api/paddle/nn/functional/density_prior_box_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/detection_output_cn.rst b/doc/paddle/api/paddle/nn/functional/detection_output_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/detection_output_cn.rst rename to doc/paddle/api/paddle/nn/functional/detection_output_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/dice_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/dice_loss_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/dice_loss_cn.rst rename to doc/paddle/api/paddle/nn/functional/dice_loss_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/distribute_fpn_proposals_cn.rst b/doc/paddle/api/paddle/nn/functional/distribute_fpn_proposals_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/distribute_fpn_proposals_cn.rst rename to doc/paddle/api/paddle/nn/functional/distribute_fpn_proposals_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/dropout_cn.rst b/doc/paddle/api/paddle/nn/functional/dropout_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/dropout_cn.rst rename to doc/paddle/api/paddle/nn/functional/dropout_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/edit_distance_cn.rst b/doc/paddle/api/paddle/nn/functional/edit_distance_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/edit_distance_cn.rst rename to doc/paddle/api/paddle/nn/functional/edit_distance_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/elu_cn.rst b/doc/paddle/api/paddle/nn/functional/elu_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/elu_cn.rst rename to doc/paddle/api/paddle/nn/functional/elu_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/exponential_decay_cn.rst b/doc/paddle/api/paddle/nn/functional/exponential_decay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/exponential_decay_cn.rst rename to doc/paddle/api/paddle/nn/functional/exponential_decay_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/filter_by_instag_cn.rst b/doc/paddle/api/paddle/nn/functional/filter_by_instag_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/filter_by_instag_cn.rst rename to doc/paddle/api/paddle/nn/functional/filter_by_instag_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/fsp_matrix_cn.rst b/doc/paddle/api/paddle/nn/functional/fsp_matrix_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/fsp_matrix_cn.rst rename to doc/paddle/api/paddle/nn/functional/fsp_matrix_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/gelu_cn.rst b/doc/paddle/api/paddle/nn/functional/gelu_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/gelu_cn.rst rename to doc/paddle/api/paddle/nn/functional/gelu_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/generate_mask_labels_cn.rst b/doc/paddle/api/paddle/nn/functional/generate_mask_labels_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/generate_mask_labels_cn.rst rename to doc/paddle/api/paddle/nn/functional/generate_mask_labels_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/generate_proposal_labels_cn.rst b/doc/paddle/api/paddle/nn/functional/generate_proposal_labels_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/generate_proposal_labels_cn.rst rename to doc/paddle/api/paddle/nn/functional/generate_proposal_labels_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/generate_proposals_cn.rst b/doc/paddle/api/paddle/nn/functional/generate_proposals_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/generate_proposals_cn.rst rename to doc/paddle/api/paddle/nn/functional/generate_proposals_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/grid_sampler_cn.rst b/doc/paddle/api/paddle/nn/functional/grid_sampler_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/grid_sampler_cn.rst rename to doc/paddle/api/paddle/nn/functional/grid_sampler_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/hard_sigmoid_cn.rst b/doc/paddle/api/paddle/nn/functional/hard_sigmoid_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/hard_sigmoid_cn.rst rename to doc/paddle/api/paddle/nn/functional/hard_sigmoid_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/hard_swish_cn.rst b/doc/paddle/api/paddle/nn/functional/hard_swish_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/hard_swish_cn.rst rename to doc/paddle/api/paddle/nn/functional/hard_swish_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/hash_cn.rst b/doc/paddle/api/paddle/nn/functional/hash_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/hash_cn.rst rename to doc/paddle/api/paddle/nn/functional/hash_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/huber_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/huber_loss_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/huber_loss_cn.rst rename to doc/paddle/api/paddle/nn/functional/huber_loss_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/image_resize_cn.rst b/doc/paddle/api/paddle/nn/functional/image_resize_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/image_resize_cn.rst rename to doc/paddle/api/paddle/nn/functional/image_resize_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/image_resize_short_cn.rst b/doc/paddle/api/paddle/nn/functional/image_resize_short_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/image_resize_short_cn.rst rename to doc/paddle/api/paddle/nn/functional/image_resize_short_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/inverse_time_decay_cn.rst b/doc/paddle/api/paddle/nn/functional/inverse_time_decay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/inverse_time_decay_cn.rst rename to doc/paddle/api/paddle/nn/functional/inverse_time_decay_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/iou_similarity_cn.rst b/doc/paddle/api/paddle/nn/functional/iou_similarity_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/iou_similarity_cn.rst rename to doc/paddle/api/paddle/nn/functional/iou_similarity_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/kldiv_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/kldiv_loss_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/kldiv_loss_cn.rst rename to doc/paddle/api/paddle/nn/functional/kldiv_loss_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/l2_normalize_cn.rst b/doc/paddle/api/paddle/nn/functional/l2_normalize_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/l2_normalize_cn.rst rename to doc/paddle/api/paddle/nn/functional/l2_normalize_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/label_smooth_cn.rst b/doc/paddle/api/paddle/nn/functional/label_smooth_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/label_smooth_cn.rst rename to doc/paddle/api/paddle/nn/functional/label_smooth_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/leaky_relu_cn.rst b/doc/paddle/api/paddle/nn/functional/leaky_relu_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/leaky_relu_cn.rst rename to doc/paddle/api/paddle/nn/functional/leaky_relu_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/linear_lr_warmup_cn.rst b/doc/paddle/api/paddle/nn/functional/linear_lr_warmup_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/linear_lr_warmup_cn.rst rename to doc/paddle/api/paddle/nn/functional/linear_lr_warmup_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/log_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/log_loss_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/log_loss_cn.rst rename to doc/paddle/api/paddle/nn/functional/log_loss_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/logsigmoid_cn.rst b/doc/paddle/api/paddle/nn/functional/logsigmoid_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/logsigmoid_cn.rst rename to doc/paddle/api/paddle/nn/functional/logsigmoid_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/mse_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/loss/mse_loss_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/mse_loss_cn.rst rename to doc/paddle/api/paddle/nn/functional/loss/mse_loss_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/lrn_cn.rst b/doc/paddle/api/paddle/nn/functional/lrn_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/lrn_cn.rst rename to doc/paddle/api/paddle/nn/functional/lrn_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/maxout_cn.rst b/doc/paddle/api/paddle/nn/functional/maxout_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/maxout_cn.rst rename to doc/paddle/api/paddle/nn/functional/maxout_cn.rst diff --git a/doc/paddle/api/paddle/nn/functional/mse_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/mse_loss_cn.rst new file mode 100644 index 000000000..59678570f --- /dev/null +++ b/doc/paddle/api/paddle/nn/functional/mse_loss_cn.rst @@ -0,0 +1,40 @@ +.. _cn_api_fluid_layers_mse_loss: + +mse_loss +------------------------------- + +.. py:function:: paddle.fluid.layers.mse_loss(input,label) + +:alias_main: paddle.nn.functional.mse_loss +:alias: paddle.nn.functional.mse_loss,paddle.nn.functional.loss.mse_loss +:old_api: paddle.fluid.layers.mse_loss + + + +该OP用于计算预测值和目标值的均方差误差。 + +对于预测值input和目标值label,公式为: + +.. math:: + + Out = MEAN((input-label)^{2}) + +参数: + - **input** (Variable) - 预测值,维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维Tensor,其中最后一维D是类别数目。数据类型为float32或float64。 + - **label** (Variable) - 目标值,维度为 :math:`[N_1, N_2, ..., N_k, D]` 的多维Tensor,其中最后一维D是类别数目。数据类型为float32或float64。 + +返回:预测值和目标值的均方差 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + y = fluid.data(name='y', shape=[1], dtype='float32') + y_predict = fluid.data(name='y_predict', shape=[1], dtype='float32') + cost = fluid.layers.mse_loss(input=y_predict, label=y) + + + diff --git a/doc/paddle/api/paddle/fluid/layers/multiclass_nms_cn.rst b/doc/paddle/api/paddle/nn/functional/multiclass_nms_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/multiclass_nms_cn.rst rename to doc/paddle/api/paddle/nn/functional/multiclass_nms_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/natural_exp_decay_cn.rst b/doc/paddle/api/paddle/nn/functional/natural_exp_decay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/natural_exp_decay_cn.rst rename to doc/paddle/api/paddle/nn/functional/natural_exp_decay_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/noam_decay_cn.rst b/doc/paddle/api/paddle/nn/functional/noam_decay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/noam_decay_cn.rst rename to doc/paddle/api/paddle/nn/functional/noam_decay_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/npair_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/npair_loss_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/npair_loss_cn.rst rename to doc/paddle/api/paddle/nn/functional/npair_loss_cn.rst diff --git a/doc/paddle/api/paddle/fluid/one_hot_cn.rst b/doc/paddle/api/paddle/nn/functional/one_hot_cn.rst similarity index 65% rename from doc/paddle/api/paddle/fluid/one_hot_cn.rst rename to doc/paddle/api/paddle/nn/functional/one_hot_cn.rst index 2aaaf117e..fbf08df21 100644 --- a/doc/paddle/api/paddle/fluid/one_hot_cn.rst +++ b/doc/paddle/api/paddle/nn/functional/one_hot_cn.rst @@ -1,26 +1,27 @@ -.. _cn_api_fluid_layers_one_hot: +.. _cn_api_fluid_one_hot: one_hot ------------------------------- -.. py:function:: paddle.fluid.layers.one_hot(input, depth, allow_out_of_range=False) +.. py:function:: paddle.fluid.one_hot(input, depth, allow_out_of_range=False) +:alias_main: paddle.nn.functional.one_hot +:alias: paddle.nn.functional.one_hot,paddle.nn.functional.common.one_hot +:old_api: paddle.fluid.one_hot -**注意:此OP要求输入Tensor shape的最后一维必须为1。此OP将在未来的版本中被移除!推荐使用fluid.** :ref:`cn_api_fluid_one_hot` 。 - 该OP将输入(input)中的每个id转换为一个one-hot向量,其长度为 ``depth`` ,该id对应的向量维度上的值为1,其余维度的值为0。 -输出的Tensor(或LoDTensor)的shape是将输入shape的最后一维替换为depth的维度。 +输出的Tensor(或LoDTensor)的shape是在输入shape的最后一维后面添加了depth的维度。 - 示例1(allow_out_of_range=False): .. code-block:: python 输入: - X.shape = [4, 1] - X.data = [[1], [1], [3], [0]] + X.shape = [4] + X.data = [1, 1, 3, 0] depth = 4 输出: @@ -35,8 +36,8 @@ one_hot .. code-block:: python 输入: - X.shape = [4, 1] - X.data = [[1], [1], [5], [0]] + X.shape = [4] + X.data = [1, 1, 5, 0] depth = 4 allow_out_of_range=True @@ -52,8 +53,8 @@ one_hot .. code-block:: python 输入: - X.shape = [4, 1] - X.data = [[1], [1], [5], [0]] + X.shape = [4] + X.data = [1, 1, 5, 0] depth = 4 allow_out_of_range=False @@ -62,7 +63,7 @@ one_hot 参数: - - **input** (Variable) - 维度为 :math:`[N_1, ..., N_n, 1]` 的多维Tensor或LoDTensor,维度至少两维,且最后一维必须是1。数据类型为int32或int64。 + - **input** (Variable) - 维度为 :math:`[N_1, ..., N_n]` 的多维Tensor或LoDTensor,维度至少1维。数据类型为int32或int64。 - **depth** (int) - 用于定义一个one-hot向量的长度。若输入为词id,则 ``depth`` 通常取值为词典大小。 - **allow_out_of_range** (bool) - 指明input中所包含的id值是否可以大于depth值。当超过depth时,如果 `allow_out_of_range` 为False,则会抛出 `Illegal value` 的异常;如果设置为True,该id对应的向量为0向量。默认值为False。 @@ -75,6 +76,6 @@ one_hot .. code-block:: python import paddle.fluid as fluid - # 该代码对应上述第一个示例,其中输入label的shape是[4, 1],输出one_hot_label的shape是[4, 4] - label = fluid.layers.data(name="label", shape=[4, 1], append_batch_size=False, dtype="int64") - one_hot_label = fluid.layers.one_hot(input=label, depth=4) + # 该代码对应上述第一个示例,其中输入label的shape是[4],输出one_hot_label的shape是[4, 4] + label = fluid.layers.data(name="label", shape=[4], append_batch_size=False, dtype="int64") + one_hot_label = fluid.one_hot(input=label, depth=4) diff --git a/doc/paddle/api/paddle/fluid/layers/pad2d_cn.rst b/doc/paddle/api/paddle/nn/functional/pad2d_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/pad2d_cn.rst rename to doc/paddle/api/paddle/nn/functional/pad2d_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/pad_cn.rst b/doc/paddle/api/paddle/nn/functional/pad_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/pad_cn.rst rename to doc/paddle/api/paddle/nn/functional/pad_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/pad_constant_like_cn.rst b/doc/paddle/api/paddle/nn/functional/pad_constant_like_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/pad_constant_like_cn.rst rename to doc/paddle/api/paddle/nn/functional/pad_constant_like_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/piecewise_decay_cn.rst b/doc/paddle/api/paddle/nn/functional/piecewise_decay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/piecewise_decay_cn.rst rename to doc/paddle/api/paddle/nn/functional/piecewise_decay_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/pixel_shuffle_cn.rst b/doc/paddle/api/paddle/nn/functional/pixel_shuffle_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/pixel_shuffle_cn.rst rename to doc/paddle/api/paddle/nn/functional/pixel_shuffle_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/polygon_box_transform_cn.rst b/doc/paddle/api/paddle/nn/functional/polygon_box_transform_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/polygon_box_transform_cn.rst rename to doc/paddle/api/paddle/nn/functional/polygon_box_transform_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/polynomial_decay_cn.rst b/doc/paddle/api/paddle/nn/functional/polynomial_decay_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/polynomial_decay_cn.rst rename to doc/paddle/api/paddle/nn/functional/polynomial_decay_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/pool2d_cn.rst b/doc/paddle/api/paddle/nn/functional/pool2d_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/pool2d_cn.rst rename to doc/paddle/api/paddle/nn/functional/pool2d_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/pool3d_cn.rst b/doc/paddle/api/paddle/nn/functional/pool3d_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/pool3d_cn.rst rename to doc/paddle/api/paddle/nn/functional/pool3d_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/prior_box_cn.rst b/doc/paddle/api/paddle/nn/functional/prior_box_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/prior_box_cn.rst rename to doc/paddle/api/paddle/nn/functional/prior_box_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/prroi_pool_cn.rst b/doc/paddle/api/paddle/nn/functional/prroi_pool_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/prroi_pool_cn.rst rename to doc/paddle/api/paddle/nn/functional/prroi_pool_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/psroi_pool_cn.rst b/doc/paddle/api/paddle/nn/functional/psroi_pool_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/psroi_pool_cn.rst rename to doc/paddle/api/paddle/nn/functional/psroi_pool_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/random_crop_cn.rst b/doc/paddle/api/paddle/nn/functional/random_crop_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/random_crop_cn.rst rename to doc/paddle/api/paddle/nn/functional/random_crop_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/rank_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/rank_loss_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/rank_loss_cn.rst rename to doc/paddle/api/paddle/nn/functional/rank_loss_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/relu6_cn.rst b/doc/paddle/api/paddle/nn/functional/relu6_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/relu6_cn.rst rename to doc/paddle/api/paddle/nn/functional/relu6_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/resize_bilinear_cn.rst b/doc/paddle/api/paddle/nn/functional/resize_bilinear_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/resize_bilinear_cn.rst rename to doc/paddle/api/paddle/nn/functional/resize_bilinear_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/resize_nearest_cn.rst b/doc/paddle/api/paddle/nn/functional/resize_nearest_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/resize_nearest_cn.rst rename to doc/paddle/api/paddle/nn/functional/resize_nearest_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/resize_trilinear_cn.rst b/doc/paddle/api/paddle/nn/functional/resize_trilinear_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/resize_trilinear_cn.rst rename to doc/paddle/api/paddle/nn/functional/resize_trilinear_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/retinanet_detection_output_cn.rst b/doc/paddle/api/paddle/nn/functional/retinanet_detection_output_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/retinanet_detection_output_cn.rst rename to doc/paddle/api/paddle/nn/functional/retinanet_detection_output_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/retinanet_target_assign_cn.rst b/doc/paddle/api/paddle/nn/functional/retinanet_target_assign_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/retinanet_target_assign_cn.rst rename to doc/paddle/api/paddle/nn/functional/retinanet_target_assign_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/roi_align_cn.rst b/doc/paddle/api/paddle/nn/functional/roi_align_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/roi_align_cn.rst rename to doc/paddle/api/paddle/nn/functional/roi_align_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/roi_perspective_transform_cn.rst b/doc/paddle/api/paddle/nn/functional/roi_perspective_transform_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/roi_perspective_transform_cn.rst rename to doc/paddle/api/paddle/nn/functional/roi_perspective_transform_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/roi_pool_cn.rst b/doc/paddle/api/paddle/nn/functional/roi_pool_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/roi_pool_cn.rst rename to doc/paddle/api/paddle/nn/functional/roi_pool_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/rpn_target_assign_cn.rst b/doc/paddle/api/paddle/nn/functional/rpn_target_assign_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/rpn_target_assign_cn.rst rename to doc/paddle/api/paddle/nn/functional/rpn_target_assign_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/sampled_softmax_with_cross_entropy_cn.rst b/doc/paddle/api/paddle/nn/functional/sampled_softmax_with_cross_entropy_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/sampled_softmax_with_cross_entropy_cn.rst rename to doc/paddle/api/paddle/nn/functional/sampled_softmax_with_cross_entropy_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/selu_cn.rst b/doc/paddle/api/paddle/nn/functional/selu_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/selu_cn.rst rename to doc/paddle/api/paddle/nn/functional/selu_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/shuffle_channel_cn.rst b/doc/paddle/api/paddle/nn/functional/shuffle_channel_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/shuffle_channel_cn.rst rename to doc/paddle/api/paddle/nn/functional/shuffle_channel_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/sigmoid_cross_entropy_with_logits_cn.rst b/doc/paddle/api/paddle/nn/functional/sigmoid_cross_entropy_with_logits_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/sigmoid_cross_entropy_with_logits_cn.rst rename to doc/paddle/api/paddle/nn/functional/sigmoid_cross_entropy_with_logits_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/sigmoid_focal_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/sigmoid_focal_loss_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/sigmoid_focal_loss_cn.rst rename to doc/paddle/api/paddle/nn/functional/sigmoid_focal_loss_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/similarity_focus_cn.rst b/doc/paddle/api/paddle/nn/functional/similarity_focus_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/similarity_focus_cn.rst rename to doc/paddle/api/paddle/nn/functional/similarity_focus_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/smooth_l1_cn.rst b/doc/paddle/api/paddle/nn/functional/smooth_l1_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/smooth_l1_cn.rst rename to doc/paddle/api/paddle/nn/functional/smooth_l1_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/soft_relu_cn.rst b/doc/paddle/api/paddle/nn/functional/soft_relu_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/soft_relu_cn.rst rename to doc/paddle/api/paddle/nn/functional/soft_relu_cn.rst diff --git a/doc/paddle/api/paddle/nn/functional/softmax_cn.rst b/doc/paddle/api/paddle/nn/functional/softmax_cn.rst deleted file mode 100755 index a11f95507..000000000 --- a/doc/paddle/api/paddle/nn/functional/softmax_cn.rst +++ /dev/null @@ -1,118 +0,0 @@ -.. _cn_api_fluid_layers_softmax: - -softmax -------------------------------- - -.. py:function:: paddle.fluid.layers.softmax(input, use_cudnn=False, name=None, axis=-1) - -:alias_main: paddle.nn.functional.softmax -:alias: paddle.nn.functional.softmax,paddle.nn.functional.activation.softmax -:old_api: paddle.fluid.layers.softmax - - - -该OP实现了softmax层。OP的计算过程如下: - -步骤1:输入 ``input`` 的 ``axis`` 维会被置换到最后一维; - -步骤2:将输入 ``Tensor`` 在逻辑上变换为二维矩阵。二维矩阵第一维(列长度)是输入除最后一维之外的其他维度值的乘积,第二维(行长度)和输入 ``axis`` 维的长度相同;对于矩阵的每一行,softmax操作对其进行重新缩放,使得该行的每个元素在 \[0,1\] 范围内,并且总和为1; - -步骤3:softmax操作执行完成后,执行步骤1和步骤2的逆运算,将二维矩阵恢复至和输入 ``input`` 相同的维度。 - -上述步骤2中softmax操作计算过程如下: - - - 对于二维矩阵的每一行,计算K维向量(K是输入第 ``axis`` 维的长度)中指定位置的指数值和全部位置指数值的和。 - - - 指定位置指数值与全部位置指数值之和的比值就是softmax操作的输出。 - -对于二维矩阵中的第i行和第j列有: - -.. math:: - - - Out[i,j] = \frac{exp(X[i,j])}{\sum_j exp(X[i,j])} - -- 示例1(矩阵一共有三维。axis = -1,表示沿着最后一维(即第三维)做softmax操作) - -.. code-block:: python - - 输入 - - X.shape = [2, 3, 4] - - X.data = [[[2.0, 3.0, 4.0, 5.0], - [3.0, 4.0, 5.0, 6.0], - [7.0, 8.0, 8.0, 9.0]], - [[1.0, 2.0, 3.0, 4.0], - [5.0, 6.0, 7.0, 8.0], - [6.0, 7.0, 8.0, 9.0]]] - - axis = -1 - - 输出 - - Out.shape = [2, 3, 4] - - Out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], - [0.0320586 , 0.08714432, 0.23688282, 0.64391426], - [0.07232949, 0.19661193, 0.19661193, 0.53444665]], - [[0.0320586 , 0.08714432, 0.23688282, 0.64391426], - [0.0320586 , 0.08714432, 0.23688282, 0.64391426], - [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]] - -- 示例2(矩阵一共有三维。axis = 1,表示沿着第二维做softmax操作) - -.. code-block:: python - - 输入 - - X.shape = [2, 3, 4] - - X.data = [[[2.0, 3.0, 4.0, 5.0], - [3.0, 4.0, 5.0, 6.0], - [7.0, 8.0, 8.0, 9.0]], - [[1.0, 2.0, 3.0, 4.0], - [5.0, 6.0, 7.0, 8.0], - [6.0, 7.0, 8.0, 9.0]]] - - axis = 1 - - 输出 - - Out.shape = [2, 3, 4] - - Out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783], - [0.01786798, 0.01786798, 0.04661262, 0.04661262], - [0.97555875, 0.97555875, 0.93623955, 0.93623955]], - [[0.00490169, 0.00490169, 0.00490169, 0.00490169], - [0.26762315, 0.26762315, 0.26762315, 0.26762315], - [0.72747516, 0.72747516, 0.72747516, 0.72747516]]] - - -参数: - - **input** (Variable) - 任意维度的多维 ``Tensor`` ,数据类型为float32或float64。 - - **use_cudnn** (bool, 可选) - 指示是否用cudnn库。当 ``use_cudnn`` 为True时,在安装GPU版本Paddle并且本机安装cudnn库的前提下,使用GPU训练或推理时才有效。默认值:False。 - - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - - **axis** (int, 可选) - 指示进行softmax计算的维度索引,其范围应为 :math:`[-1,rank-1]` ,其中rank是输入变量的秩。默认值:-1(表示对最后一维做softmax操作)。 - -返回:表示softmax操作结果的 ``Tensor`` ,数据类型和 ``input`` 一致,返回维度和 ``input`` 一致。 - -返回类型:Variable - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - data = fluid.layers.data(name="input", shape=[-1, 3],dtype="float32") - result = fluid.layers.softmax(data,axis=1) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - x = np.random.rand(3, 3).astype("float32") - output= exe.run(feed={"input": x}, - fetch_list=[result[0]]) - print(output) - diff --git a/doc/paddle/api/paddle/fluid/layers/softmax_with_cross_entropy_cn.rst b/doc/paddle/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/softmax_with_cross_entropy_cn.rst rename to doc/paddle/api/paddle/nn/functional/softmax_with_cross_entropy_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/softplus_cn.rst b/doc/paddle/api/paddle/nn/functional/softplus_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/softplus_cn.rst rename to doc/paddle/api/paddle/nn/functional/softplus_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/softshrink_cn.rst b/doc/paddle/api/paddle/nn/functional/softshrink_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/softshrink_cn.rst rename to doc/paddle/api/paddle/nn/functional/softshrink_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/softsign_cn.rst b/doc/paddle/api/paddle/nn/functional/softsign_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/softsign_cn.rst rename to doc/paddle/api/paddle/nn/functional/softsign_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/space_to_depth_cn.rst b/doc/paddle/api/paddle/nn/functional/space_to_depth_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/space_to_depth_cn.rst rename to doc/paddle/api/paddle/nn/functional/space_to_depth_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/square_error_cost_cn.rst b/doc/paddle/api/paddle/nn/functional/square_error_cost_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/square_error_cost_cn.rst rename to doc/paddle/api/paddle/nn/functional/square_error_cost_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/ssd_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/ssd_loss_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/ssd_loss_cn.rst rename to doc/paddle/api/paddle/nn/functional/ssd_loss_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/swish_cn.rst b/doc/paddle/api/paddle/nn/functional/swish_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/swish_cn.rst rename to doc/paddle/api/paddle/nn/functional/swish_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/tanh_shrink_cn.rst b/doc/paddle/api/paddle/nn/functional/tanh_shrink_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/tanh_shrink_cn.rst rename to doc/paddle/api/paddle/nn/functional/tanh_shrink_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/target_assign_cn.rst b/doc/paddle/api/paddle/nn/functional/target_assign_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/target_assign_cn.rst rename to doc/paddle/api/paddle/nn/functional/target_assign_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/teacher_student_sigmoid_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/teacher_student_sigmoid_loss_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/teacher_student_sigmoid_loss_cn.rst rename to doc/paddle/api/paddle/nn/functional/teacher_student_sigmoid_loss_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/temporal_shift_cn.rst b/doc/paddle/api/paddle/nn/functional/temporal_shift_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/temporal_shift_cn.rst rename to doc/paddle/api/paddle/nn/functional/temporal_shift_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/thresholded_relu_cn.rst b/doc/paddle/api/paddle/nn/functional/thresholded_relu_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/thresholded_relu_cn.rst rename to doc/paddle/api/paddle/nn/functional/thresholded_relu_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/unfold_cn.rst b/doc/paddle/api/paddle/nn/functional/unfold_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/unfold_cn.rst rename to doc/paddle/api/paddle/nn/functional/unfold_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/warpctc_cn.rst b/doc/paddle/api/paddle/nn/functional/warpctc_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/warpctc_cn.rst rename to doc/paddle/api/paddle/nn/functional/warpctc_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/yolo_box_cn.rst b/doc/paddle/api/paddle/nn/functional/yolo_box_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/yolo_box_cn.rst rename to doc/paddle/api/paddle/nn/functional/yolo_box_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/yolov3_loss_cn.rst b/doc/paddle/api/paddle/nn/functional/yolov3_loss_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/yolov3_loss_cn.rst rename to doc/paddle/api/paddle/nn/functional/yolov3_loss_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/gather_tree_cn.rst b/doc/paddle/api/paddle/nn/gather_tree_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/gather_tree_cn.rst rename to doc/paddle/api/paddle/nn/gather_tree_cn.rst diff --git a/doc/paddle/api/paddle/fluid/initializer/Bilinear_cn.rst b/doc/paddle/api/paddle/nn/initializer/Bilinear_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/initializer/Bilinear_cn.rst rename to doc/paddle/api/paddle/nn/initializer/Bilinear_cn.rst diff --git a/doc/paddle/api/paddle/fluid/initializer/Constant_cn.rst b/doc/paddle/api/paddle/nn/initializer/Constant_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/initializer/Constant_cn.rst rename to doc/paddle/api/paddle/nn/initializer/Constant_cn.rst diff --git a/doc/paddle/api/paddle/fluid/initializer/MSRA_cn.rst b/doc/paddle/api/paddle/nn/initializer/MSRA_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/initializer/MSRA_cn.rst rename to doc/paddle/api/paddle/nn/initializer/MSRA_cn.rst diff --git a/doc/paddle/api/paddle/nn/initializer/Normal_cn.rst b/doc/paddle/api/paddle/nn/initializer/Normal_cn.rst new file mode 100644 index 000000000..a7ae0fe06 --- /dev/null +++ b/doc/paddle/api/paddle/nn/initializer/Normal_cn.rst @@ -0,0 +1,16 @@ +.. _cn_api_fluid_initializer_Normal: + +Normal +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.Normal + +:alias_main: paddle.nn.initializer.Normal +:alias: paddle.nn.initializer.Normal +:old_api: paddle.fluid.initializer.Normal + + + +``NormalInitializer`` 的别名 + + diff --git a/doc/paddle/api/paddle/fluid/initializer/TruncatedNormal_cn.rst b/doc/paddle/api/paddle/nn/initializer/TruncatedNormal_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/initializer/TruncatedNormal_cn.rst rename to doc/paddle/api/paddle/nn/initializer/TruncatedNormal_cn.rst diff --git a/doc/paddle/api/paddle/nn/initializer/Uniform_cn.rst b/doc/paddle/api/paddle/nn/initializer/Uniform_cn.rst new file mode 100644 index 000000000..48a7efeee --- /dev/null +++ b/doc/paddle/api/paddle/nn/initializer/Uniform_cn.rst @@ -0,0 +1,17 @@ +.. _cn_api_fluid_initializer_Uniform: + +Uniform +------------------------------- + +.. py:attribute:: paddle.fluid.initializer.Uniform + +:alias_main: paddle.nn.initializer.Uniform +:alias: paddle.nn.initializer.Uniform +:old_api: paddle.fluid.initializer.Uniform + + + +``UniformInitializer`` 的别名 + + + diff --git a/doc/paddle/api/paddle/fluid/initializer/Xavier_cn.rst b/doc/paddle/api/paddle/nn/initializer/Xavier_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/initializer/Xavier_cn.rst rename to doc/paddle/api/paddle/nn/initializer/Xavier_cn.rst diff --git a/doc/paddle/api/paddle/nn/layer/conv/Conv2D_cn.rst b/doc/paddle/api/paddle/nn/layer/conv/Conv2D_cn.rst index b374e34f3..c09cb6548 100644 --- a/doc/paddle/api/paddle/nn/layer/conv/Conv2D_cn.rst +++ b/doc/paddle/api/paddle/nn/layer/conv/Conv2D_cn.rst @@ -1,120 +1,90 @@ +.. _cn_api_fluid_dygraph_Conv2D: + Conv2D ------------------------------- -.. py:class:: paddle.nn.Conv2d(num_channels, num_filters, filter_size, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCHW", dtype="float32") - -:alias_main: paddle.nn.Conv2D -:alias: paddle.nn.Conv2D,paddle.nn.layer.Conv2D,paddle.nn.layer.conv.Conv2D +.. py:class:: paddle.fluid.dygraph.Conv2D(num_channels, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, dtype='float32') -**二维卷积层** -该OP是二维卷积层(convolution2D layer),根据输入、滤波器、步长(stride)、填充(padding)、膨胀比例(dilations)一组参数计算输出特征层大小。输入和输出是NCHW或NHWC格式,其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。滤波器是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是滤波器高度,W是滤波器宽度。如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 +该接口用于构建 ``Conv2D`` 类的一个可调用对象,具体用法参照 ``代码示例`` 。其将在神经网络中构建一个二维卷积层(Convolution2D Layer),其根据输入、滤波器参数(num_filters、filter_size)、步长(stride)、填充(padding)、膨胀系数(dilation)、组数(groups)参数来计算得到输出特征图。输入和输出是 ``NCHW`` 格式,N是批数据大小,C是特征图个数,H是特征图高度,W是特征图宽度。滤波器的维度是 [M, C, H, W] ,M是输出特征图个数,C是输入特征图个数,H是滤波器高度,W是滤波器宽度。如果组数大于1,C等于输入特征图个数除以组数的结果。如果提供了偏移属性和激活函数类型,卷积的结果会和偏移相加,激活函数会作用在最终结果上。详情请参考: `卷积 `_ 。 -对每个输入X,有等式: +对每个输入 ``X`` ,有等式: .. math:: Out = \sigma \left ( W * X + b \right ) 其中: - - :math:`X` :输入值,NCHW或NHWC格式的4-D Tensor - - :math:`W` :滤波器值,MCHW格式的4-D Tensor + - :math:`X` :输入特征图, ``NCHW`` 格式的 ``Tensor`` + - :math:`W` :滤波器,维度为 [M, C, H, W] 的 ``Tensor`` - :math:`*` :卷积操作 - - :math:`b` :偏置值,2-D Tensor,形状为 ``[M,1]`` + - :math:`b` :偏移值,2-D ``Tensor`` ,维度为 ``[M,1]`` - :math:`\sigma` :激活函数 - - :math:`Out` :输出值,NCHW或NHWC格式的4-D Tensor, 和 ``X`` 的形状可能不同 + - :math:`Out` :输出值, ``Out`` 和 ``X`` 的维度可能不同 -**示例** +**输出维度计算示例** - 输入: - 输入形状::math:`(N,C_{in},H_{in},W_{in})` + 输入维度: :math:`(N,C_{in},H_{in},W_{in})` - 滤波器形状: :math:`(C_{out},C_{in},H_{f},W_{f})` + 滤波器维度: :math:`(C_{out},C_{in},H_{f},W_{f})` - 输出: - 输出形状: :math:`(N,C_{out},H_{out},W_{out})` + 输出维度: :math:`(N,C_{out},H_{out},W_{out})` -其中 +- 其中 .. math:: - H_{out} &= \frac{\left ( H_{in} + padding\_height\_top + padding\_height\_bottom-\left ( dilation[0]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 + H_{out} = \frac{\left ( H_{in}+2*paddings[0]-\left ( dilations[0]*\left ( H_{f}-1 \right )+1 \right ) \right )}{strides[0]}+1 - W_{out} &= \frac{\left ( W_{in} + padding\_width\_left + padding\_width\_right -\left ( dilation[1]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 + W_{out} = \frac{\left ( W_{in}+2*paddings[1]-\left ( dilations[1]*\left ( W_{f}-1 \right )+1 \right ) \right )}{strides[1]}+1 -如果 ``padding`` = "SAME": +参数: + - **num_channels** (int) - 输入图像的通道数。 + - **num_fliters** (int) - 滤波器的个数,和输出特征图个数相同。 + - **filter_size** (int|tuple) - 滤波器大小。如果 ``filter_size`` 是一个元组,则必须包含两个整型数,分别表示滤波器高度和宽度。否则,表示滤波器高度和宽度均为 ``filter_size`` 。 + - **stride** (int|tuple, 可选) - 步长大小。如果 ``stride`` 为元组,则必须包含两个整型数,分别表示垂直和水平滑动步长。否则,表示垂直和水平滑动步长均为 ``stride`` 。默认值:1。 + - **padding** (int|tuple, 可选) - 填充大小。如果 ``padding`` 为元组,则必须包含两个整型数,分别表示竖直和水平边界填充大小。否则,表示竖直和水平边界填充大小均为 ``padding`` 。默认值:0。 + - **dilation** (int|tuple, 可选) - 膨胀系数大小。如果 ``dialation`` 为元组,则必须包含两个整型数,分别表示垂直和水平膨胀系数。否则,表示垂直和水平膨胀系数均为 ``dialation`` 。默认值:1。 + - **groups** (int, 可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的分组卷积:当group=2,滤波器的前一半仅和输入特征图的前一半连接。滤波器的后一半仅和输入特征图的后一半连接。默认值:1。 + - **param_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool, 可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **use_cudnn** (bool, 可选) - 是否用cudnn核,只有已安装cudnn库时才有效。默认值:True。 + - **act** (str, 可选) - 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值:None。 + - **dtype** (str, 可选) - 数据类型,可以为"float32"或"float64"。默认值:"float32"。 + +返回:无 -.. math:: - H_{out} = \frac{(H_{in} + stride[0] - 1)}{stride[0]} +抛出异常: + - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值 -.. math:: - W_{out} = \frac{(W_{in} + stride[1] - 1)}{stride[1]} +**代码示例** -如果 ``padding`` = "VALID": +.. code-block:: python -.. math:: - H_{out} = \frac{\left ( H_{in} -\left ( dilation[0]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 - - W_{out} = \frac{\left ( W_{in} -\left ( dilation[1]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 - -参数: - - **num_channels** (int) - 输入图像的通道数。 - - **num_filters** (int) - 滤波器(卷积核)的个数。和输出图像通道相同。 - - **filter_size** (int|list|tuple) - 滤波器大小。如果它是一个列表或元组,则必须包含两个整数值:(filter_size_height,filter_size_width)。若为一个整数,filter_size_height = filter_size_width = filter_size。 - - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 - - **stride** (int|list|tuple,可选) - 步长大小。滤波器和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含两个整型数:(stride_height,stride_width)。若为一个整数,stride_height = stride_width = stride。默认值:1。 - - **dilation** (int|list|tuple,可选) - 膨胀比例大小。空洞卷积时会使用该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果膨胀比例为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 - - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和滤波器分别根据通道数量平均分为n组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算,……,第n组滤波器和第n组输入进行卷积计算。默认值:1。 - - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 - - **use_cudnn** (bool,可选)- 是否使用cudnn内核。只有已安装cudnn库时才有效。默认值:True。 - - **act** (str,可选) - 激活函数类型, 如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` 。如果设为None,则未添加激活函数。默认值:None。 - - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值:None。 - - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 - - **dtype** (str, 可选) – 权重的数据类型,可以为float32或float64。默认为float32。 + from paddle.fluid.dygraph.base import to_variable + import paddle.fluid as fluid + from paddle.fluid.dygraph import Conv2D + import numpy as np + data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32') + with fluid.dygraph.guard(): + conv2d = Conv2D(3, 2, 3) + data = to_variable(data) + conv = conv2d(data) 属性 :::::::::::: .. py:attribute:: weight + 本层的可学习参数,类型为 ``Parameter`` .. py:attribute:: bias -本层的可学习偏置,类型为 ``Parameter`` - -返回: 无。 - -抛出异常: - - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值。 - - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 - - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 - - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 - - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 - - ``ShapeError`` - 如果输入不是4-D Tensor。 - - ``ShapeError`` - 如果输入和滤波器的维度大小不相同。 - - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 - - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 - - -**代码示例**: -.. code-block:: python +本层的可学习偏置,类型为 ``Parameter`` - import numpy as np - from paddle import fluid - import paddle.fluid.dygraph as dg - from paddle import nn - x = np.random.uniform(-1, 1, (2, 4, 8, 8)).astype('float32') - place = fluid.CPUPlace() - with dg.guard(place): - x_var = dg.to_variable(x) - conv = nn.Conv2D(4, 6, (3, 3)) - y_var = conv(x_var) - y_np = y_var.numpy() - print(y_np.shape) - - # (2, 6, 6, 6) diff --git a/doc/paddle/api/paddle/fluid/layers/switch_case_cn.rst b/doc/paddle/api/paddle/nn/switch_case_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/switch_case_cn.rst rename to doc/paddle/api/paddle/nn/switch_case_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/while_loop_cn.rst b/doc/paddle/api/paddle/nn/while_loop_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/while_loop_cn.rst rename to doc/paddle/api/paddle/nn/while_loop_cn.rst diff --git a/doc/paddle/api/paddle/framework/no_grad_cn.rst b/doc/paddle/api/paddle/no_grad_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/no_grad_cn.rst rename to doc/paddle/api/paddle/no_grad_cn.rst diff --git a/doc/paddle/api/paddle/framework/prepare_context_cn.rst b/doc/paddle/api/paddle/prepare_context_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/prepare_context_cn.rst rename to doc/paddle/api/paddle/prepare_context_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/rank_cn.rst b/doc/paddle/api/paddle/rank_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/rank_cn.rst rename to doc/paddle/api/paddle/rank_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/reciprocal_cn.rst b/doc/paddle/api/paddle/reciprocal_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/reciprocal_cn.rst rename to doc/paddle/api/paddle/reciprocal_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_all_cn.rst b/doc/paddle/api/paddle/reduce_all_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/reduce_all_cn.rst rename to doc/paddle/api/paddle/reduce_all_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_any_cn.rst b/doc/paddle/api/paddle/reduce_any_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/reduce_any_cn.rst rename to doc/paddle/api/paddle/reduce_any_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_max_cn.rst b/doc/paddle/api/paddle/reduce_max_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/reduce_max_cn.rst rename to doc/paddle/api/paddle/reduce_max_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_mean_cn.rst b/doc/paddle/api/paddle/reduce_mean_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/reduce_mean_cn.rst rename to doc/paddle/api/paddle/reduce_mean_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_min_cn.rst b/doc/paddle/api/paddle/reduce_min_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/reduce_min_cn.rst rename to doc/paddle/api/paddle/reduce_min_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_prod_cn.rst b/doc/paddle/api/paddle/reduce_prod_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/reduce_prod_cn.rst rename to doc/paddle/api/paddle/reduce_prod_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/reduce_sum_cn.rst b/doc/paddle/api/paddle/reduce_sum_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/reduce_sum_cn.rst rename to doc/paddle/api/paddle/reduce_sum_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/reshape_cn.rst b/doc/paddle/api/paddle/reshape_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/reshape_cn.rst rename to doc/paddle/api/paddle/reshape_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/round_cn.rst b/doc/paddle/api/paddle/round_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/round_cn.rst rename to doc/paddle/api/paddle/round_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/rsqrt_cn.rst b/doc/paddle/api/paddle/rsqrt_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/rsqrt_cn.rst rename to doc/paddle/api/paddle/rsqrt_cn.rst diff --git a/doc/paddle/api/paddle/save_cn.rst b/doc/paddle/api/paddle/save_cn.rst new file mode 100644 index 000000000..a15b23f2b --- /dev/null +++ b/doc/paddle/api/paddle/save_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_save: + +save +------------------------------- + + +.. py:function:: paddle.fluid.save(program, model_path) + +:api_attr: 声明式编程模式(静态图) +:alias_main: paddle.save +:alias: paddle.save,paddle.tensor.save,paddle.tensor.io.save +:old_api: paddle.fluid.save + + + +该接口将传入的参数、优化器信息和网络描述保存到 ``model_path`` 。 + +参数包含所有的可训练 :ref:`cn_api_fluid_Variable` ,将保存到后缀为 ``.pdparams`` 的文件中。 + +优化器信息包含优化器使用的所有变量。对于Adam优化器,包含beta1、beta2、momentum等。 +所有信息将保存到后缀为 ``.pdopt`` 的文件中。(如果优化器没有需要保存的变量(如sgd),则不会生成)。 + +网络描述是程序的描述。它只用于部署。描述将保存到后缀为 ``.pdmodel`` 的文件中。 + +参数: + - **program** ( :ref:`cn_api_fluid_Program` ) – 要保存的Program。 + - **model_path** (str) – 保存program的文件前缀。格式为 ``目录名称/文件前缀``。如果文件前缀为空字符串,会引发异常。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + x = fluid.data(name="x", shape=[10, 10], dtype='float32') + y = fluid.layers.fc(x, 10) + z = fluid.layers.fc(y, 10) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + fluid.save(fluid.default_main_program(), "./test_path") + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/scale_cn.rst b/doc/paddle/api/paddle/scale_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/scale_cn.rst rename to doc/paddle/api/paddle/scale_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/scatter_cn.rst b/doc/paddle/api/paddle/scatter_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/scatter_cn.rst rename to doc/paddle/api/paddle/scatter_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/scatter_nd_add_cn.rst b/doc/paddle/api/paddle/scatter_nd_add_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/scatter_nd_add_cn.rst rename to doc/paddle/api/paddle/scatter_nd_add_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/scatter_nd_cn.rst b/doc/paddle/api/paddle/scatter_nd_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/scatter_nd_cn.rst rename to doc/paddle/api/paddle/scatter_nd_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/shape_cn.rst b/doc/paddle/api/paddle/shape_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/shape_cn.rst rename to doc/paddle/api/paddle/shape_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/shard_index_cn.rst b/doc/paddle/api/paddle/shard_index_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/shard_index_cn.rst rename to doc/paddle/api/paddle/shard_index_cn.rst diff --git a/doc/paddle/api/paddle/fluid/io/shuffle_cn.rst b/doc/paddle/api/paddle/shuffle_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/io/shuffle_cn.rst rename to doc/paddle/api/paddle/shuffle_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/sign_cn.rst b/doc/paddle/api/paddle/sign_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/sign_cn.rst rename to doc/paddle/api/paddle/sign_cn.rst diff --git a/doc/paddle/api/paddle/tensor/math/sin_cn.rst b/doc/paddle/api/paddle/sin_cn.rst similarity index 66% rename from doc/paddle/api/paddle/tensor/math/sin_cn.rst rename to doc/paddle/api/paddle/sin_cn.rst index 97d3fec0f..5dd8bcd22 100644 --- a/doc/paddle/api/paddle/tensor/math/sin_cn.rst +++ b/doc/paddle/api/paddle/sin_cn.rst @@ -1,25 +1,18 @@ -.. _cn_api_tensor_sin: +.. _cn_api_fluid_layers_sin: sin ------------------------------- -.. py:function:: paddle.sin(x, name=None, out=None) +.. py:function:: paddle.fluid.layers.sin(x, name=None) -:alias_main: paddle.sin -:alias: paddle.sin,paddle.tensor.sin,paddle.tensor.math.sin -:update_api: paddle.fluid.layers.sin 计算输入的正弦值。 -.. math:: - out = sin(x) - 参数: - **x** (Variable) - 支持任意维度的Tensor。数据类型为float32,float64或float16。 - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - - **out** (Variable, 可选) – 指定存储运算结果的Tensor。如果设置为None或者不设置,将创建新的Tensor存储运算结果,默认值为None。 返回:返回类型为Variable(Tensor|LoDTensor), 数据类型同输入一致。 @@ -28,11 +21,10 @@ sin .. code-block:: python import numpy as np - import paddle import paddle.fluid as fluid inputs = fluid.layers.data(name="x", shape = [3], dtype='float32') - output = paddle.sin(inputs) + output = fluid.layers.sin(inputs) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) @@ -42,3 +34,16 @@ sin res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) print(res) # [array([0. , 0.8509035 , 0.89399666], dtype=float32)] + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/slice_cn.rst b/doc/paddle/api/paddle/slice_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/slice_cn.rst rename to doc/paddle/api/paddle/slice_cn.rst diff --git a/doc/paddle/api/paddle/tensor/math/sqrt_cn.rst b/doc/paddle/api/paddle/sqrt_cn.rst similarity index 63% rename from doc/paddle/api/paddle/tensor/math/sqrt_cn.rst rename to doc/paddle/api/paddle/sqrt_cn.rst index fd64d0cb3..779ab45fb 100644 --- a/doc/paddle/api/paddle/tensor/math/sqrt_cn.rst +++ b/doc/paddle/api/paddle/sqrt_cn.rst @@ -1,20 +1,16 @@ -.. _cn_api_tensor_sqrt: +.. _cn_api_fluid_layers_sqrt: sqrt ------------------------------- -.. py:function:: paddle.sqrt(x, name=None, out=None) +.. py:function:: paddle.fluid.layers.sqrt(x, name=None) -:alias_main: paddle.sqrt -:alias: paddle.sqrt,paddle.tensor.sqrt,paddle.tensor.math.sqrt -:update_api: paddle.fluid.layers.sqrt 计算输入的算数平方根。 -.. math:: - out=\sqrt x=x^{1/2} +.. math:: out=\sqrt x=x^{1/2} .. note:: 请确保输入中的数值是非负数。 @@ -23,7 +19,6 @@ sqrt - **x** (Variable) - 支持任意维度的Tensor。数据类型为float32,float64或float16。 - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - - **out** (Variable, 可选) – 指定存储运算结果的Tensor。如果设置为None或者不设置,将创建新的Tensor存储运算结果,默认值为None。 返回:返回类型为Variable(Tensor|LoDTensor), 数据类型同输入一致。 @@ -32,11 +27,10 @@ sqrt .. code-block:: python import numpy as np - import paddle import paddle.fluid as fluid inputs = fluid.layers.data(name="x", shape = [3], dtype='float32') - output = paddle.sqrt(inputs) + output = fluid.layers.sqrt(inputs) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) @@ -45,4 +39,17 @@ sqrt res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) print(res) - # [array([0., 3., 6.], dtype=float32)] + # [array([0., 3., 6.], dtype=float32)] + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/fluid/layers/square_cn.rst b/doc/paddle/api/paddle/square_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/square_cn.rst rename to doc/paddle/api/paddle/square_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/stanh_cn.rst b/doc/paddle/api/paddle/stanh_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/stanh_cn.rst rename to doc/paddle/api/paddle/stanh_cn.rst diff --git a/doc/paddle/api/paddle/static/global_scope_cn.rst b/doc/paddle/api/paddle/static/global_scope_cn.rst index 277bb23db..86031eedc 100644 --- a/doc/paddle/api/paddle/static/global_scope_cn.rst +++ b/doc/paddle/api/paddle/static/global_scope_cn.rst @@ -1,4 +1,4 @@ -.. _cn_api_fluid_executor_global_scope: +.. _cn_api_fluid_global_scope: global_scope ------------------------------- @@ -25,4 +25,4 @@ global_scope fluid.global_scope().var("data").get_tensor().set(numpy.ones((1, 2)), fluid.CPUPlace()) data = numpy.array(fluid.global_scope().find_var("data").get_tensor()) - print(data) # [[1. 1.]] + print(data) # [[1. 1.]] \ No newline at end of file diff --git a/doc/paddle/api/paddle/static/load_cn.rst b/doc/paddle/api/paddle/static/load_cn.rst new file mode 100644 index 000000000..a12a65fbe --- /dev/null +++ b/doc/paddle/api/paddle/static/load_cn.rst @@ -0,0 +1,55 @@ +.. _cn_api_fluid_load: + +load +------------------------------- + +.. py:function:: paddle.fluid.load(program, model_path, executor=None, var_list=None) + +:api_attr: 声明式编程模式(静态图) + + + +该接口从Program中过滤出参数和优化器信息,然后从文件中获取相应的值。 + +如果Program和加载的文件之间参数的维度或数据类型不匹配,将引发异常。 + +该函数还可以加载用[save_params,save_persistables,save_vars]接口保存的模型文件。 +当[save_params,save_persistables,save_vars]保存的模型格式为单个大文件时,var_list不能为None。 + +参数: + - **program** ( :ref:`cn_api_fluid_Program` ) – 要加载的Program。 + - **model_path** (str) – 保存Program的目录名称+文件前缀。格式为 ``目录名称/文件前缀`` 。 + - **executor** (Executor, 可选) - 当startup program没有运行时,用于初始化参数的Executor。默认值:None。 + - **var_list** (list, 可选) - 指定加载的变量列表,该参数只在加载旧接口[save_params,save_persistables,save_vars]保存的模型文件时使用。当加载的是多个小文件时,变量列表可以是所有加载文件中变量的子集;当加载的单个大文件时,变量列表必须和加载文件中的变量保持一致。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + # example1 + import paddle.fluid as fluid + + x = fluid.data( name="x", shape=[10, 10], dtype='float32') + y = fluid.layers.fc(x, 10) + z = fluid.layers.fc(y, 10) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + fluid.save(fluid.default_main_program(), "./test_path") + fluid.load(fluid.default_main_program(), "./test_path") + + # example2 + # 注意example1和example2应该分开执行,避免干扰。 + import paddle.fluid as fluid + + x = fluid.data( name="x", shape=[10, 10], dtype='float32') + y = fluid.layers.fc(x, 10) + z = fluid.layers.fc(y, 10) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + fluid.save(fluid.default_main_program(), "./test_path") + fluid.load(fluid.default_main_program(), "./test_path", exe) + diff --git a/doc/paddle/api/paddle/static/nn/batch_norm_cn.rst b/doc/paddle/api/paddle/static/nn/batch_norm_cn.rst new file mode 100644 index 000000000..95fa58257 --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/batch_norm_cn.rst @@ -0,0 +1,111 @@ +.. _cn_api_fluid_layers_batch_norm: + +batch_norm +------------------------------- + + +.. py:function:: paddle.fluid.layers.batch_norm(input, act=None, is_test=False, momentum=0.9, epsilon=1e-05, param_attr=None, bias_attr=None, data_layout='NCHW', in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, use_global_stats=False) + +:api_attr: 声明式编程模式(静态图) + + + +批正则化层(Batch Normalization Layer) + +可用作卷积和全连接操作的批正则化函数,根据当前批次数据按通道计算的均值和方差进行正则化。该层需要的数据格式如下: + +1.NHWC[batch,in_height,in_width,in_channels] +2.NCHW[batch,in_channels,in_height,in_width] + +更多详情请参考 : `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift `_ + +``input`` 是mini-batch的输入。 + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\ + \ mini-batch\ mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \qquad &//\ + \ mini-batch\ variance \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift + + moving\_mean = moving\_mean * momentum + mini\_batch\_mean * (1. - momentum) \\ + moving\_variance = moving\_variance * momentum + mini\_batch\_var * (1. - momentum) + +moving_mean和moving_var是训练过程中统计得到的全局均值和方差,在预测或者评估中使用。 +`is_test` 参数只能用于测试或者评估阶段,如果想在训练阶段使用预训练模型的全局均值和方差的话,可以设置 `use_global_stats=True`. + +当use_global_stats = True时, :math:`\mu_{\beta}` 和 :math:`\sigma_{\beta}^{2}` 不是一个minibatch的统计数据。 它们是全局(或运行)统计数据(moving_mean和moving_variance),通常来自预先训练好的模型。训练和测试(或预测)具有相同的行为: + +.. math:: + + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\ + \sigma_{\beta}^{2} + \epsilon}} \\ + y_i &\gets \gamma \hat{x_i} + \beta + + + +参数: + - **input** (Variable) - batch_norm算子的输入特征,是一个Variable类型,输入维度可以是 2, 3, 4, 5。数据类型:flaot16, float32, float64。 + - **act** (string)- 激活函数类型,可以是leaky_realu、relu、prelu等。默认:None。 + - **is_test** (bool) - 指示它是否在测试阶段,非训练阶段使用训练过程中统计到的全局均值和全局方差。默认:False。 + - **momentum** (float|Variable)- 此值用于计算 moving_mean 和 moving_var,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。更新公式为: :math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)` , :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)` , 默认:0.9。 + - **epsilon** (float)- 加在分母上为了数值稳定的值。默认:1e-5。 + - **param_attr** (ParamAttr|None) :指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。batch_norm算子默认的权重初始化是1.0。 + - **bias_attr** (ParamAttr|None)- 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。batch_norm算子默认的偏置初始化是0.0。 + - **data_layout** (string) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **in_place** (bool)- batch_norm的输出复用输入的tensor,可以节省显存。默认:False。 + - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + - **moving_mean_name** (string)- moving_mean的名称,存储全局均值。如果将其设置为None, ``batch_norm`` 将随机命名全局均值;否则, ``batch_norm`` 将命名全局均值为 ``moving_mean_name`` 。默认:None。 + - **moving_variance_name** (string)- moving_variance的名称,存储全局变量。如果将其设置为None, ``batch_norm`` 将随机命名全局方差;否则, ``batch_norm`` 将命名全局方差为 ``moving_variance_name`` 。默认:None。 + - **do_model_average_for_mean_and_var** (bool,默认False)- 是否为mean和variance做模型均值。 + - **use_global_stats** (bool) – 是否使用全局均值和方差。 在预测或测试模式下,将use_global_stats设置为true或将is_test设置为true,并且行为是等效的。 在训练模式中,当设置use_global_stats为True时,在训练期间也使用全局均值和方差。默认:False。 + +返回: 维度和输入相同的Tensor,在输入中运用批正则后的结果。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + x = fluid.layers.data(name='x', shape=[3, 7, 3, 7], dtype='float32', append_batch_size=False) + hidden1 = fluid.layers.fc(input=x, size=200) + param_attr = fluid.ParamAttr(name='batch_norm_w', initializer=fluid.initializer.Constant(value=1.0)) + bias_attr = fluid.ParamAttr(name='batch_norm_b', initializer=fluid.initializer.Constant(value=0.0)) + hidden2 = fluid.layers.batch_norm(input=hidden1, param_attr = param_attr, bias_attr = bias_attr) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + np_x = np.random.random(size=(3, 7, 3, 7)).astype('float32') + output = exe.run(feed={"x": np_x}, fetch_list = [hidden2]) + print(output) + +.. code-block:: python + + # batch_norm with momentum as Variable + import paddle.fluid as fluid + import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler + + def get_decay_momentum(momentum_init, decay_steps, decay_rate): + global_step = lr_scheduler._decay_step_counter() + momentum = fluid.layers.create_global_var( + shape=[1], + value=float(momentum_init), + dtype='float32', + # set persistable for save checkpoints and resume + persistable=True, + name="momentum") + div_res = global_step / decay_steps + decayed_momentum = momentum_init * (decay_rate**div_res) + fluid.layers.assign(decayed_momentum, momentum) + + return momentum + + x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32') + hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') + momentum = get_decay_momentum(0.9, 1e5, 0.9) + hidden2 = fluid.layers.batch_norm(input=hidden1, momentum=momentum) + diff --git a/doc/paddle/api/paddle/static/nn/bilinear_tensor_product_cn.rst b/doc/paddle/api/paddle/static/nn/bilinear_tensor_product_cn.rst new file mode 100644 index 000000000..d65e2abdc --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/bilinear_tensor_product_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_layers_bilinear_tensor_product: + +bilinear_tensor_product +------------------------------- + + +.. py:function:: paddle.fluid.layers.bilinear_tensor_product(x, y, size, act=None, name=None, param_attr=None, bias_attr=None) + +:api_attr: 声明式编程模式(静态图) + + + +该层对两个输入执行双线性张量积。 + +例如: + +.. math:: + out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1 + +在这个公式中: + - :math:`x`: 第一个输入,包含 :math:`M` 个元素,形状为 [batch_size, M]。 + - :math:`y`: 第二个输入,包含 :math:`N` 个元素,形状为 [batch_size, N]。 + - :math:`W_{i}`: 第 :math:`i` 个被学习的权重,形状是 [M, N]。 + - :math:`out_{i}`: 输出的第 :math:`i` 个元素,形状是 [batch_size, size]。 + - :math:`y^\mathrm{T}`: :math:`y_{2}` 的转置。 + +参数: + - **x** (Variable): 2-D 输入张量,形状为 [batch_size, M], 数据类型为 float32 或 float64。 + - **y** (Variable): 2-D 输入张量,形状为 [batch_size, N],数据类型与 **x** 一致。 + - **size** (int): 此层的维度。 + - **act** (str, 可选): 应用到该层输出的激活函数。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为 None。 + - **param_attr** (ParamAttr,可选) :指定权重参数属性的对象。默认值为 None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) : 指定偏置参数属性的对象。默认值为 None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + +返回: 一个形为 [batch_size, size] 的 2-D 张量。 + +返回类型:Variable + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + layer1 = fluid.layers.data("t1", shape=[-1, 5], dtype="float32") + layer2 = fluid.layers.data("t2", shape=[-1, 4], dtype="float32") + tensor = fluid.layers.bilinear_tensor_product(x=layer1, y=layer2, size=1000) + + + + diff --git a/doc/paddle/api/paddle/static/nn/conv2d_cn.rst b/doc/paddle/api/paddle/static/nn/conv2d_cn.rst new file mode 100644 index 000000000..4ae868dfe --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/conv2d_cn.rst @@ -0,0 +1,114 @@ +.. _cn_api_fluid_layers_conv2d: + +conv2d +------------------------------- + + +.. py:function:: paddle.fluid.layers.conv2d(input, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCHW") + +:api_attr: 声明式编程模式(静态图) + + + +该OP是二维卷积层(convolution2D layer),根据输入、滤波器、步长(stride)、填充(padding)、膨胀比例(dilations)一组参数计算输出特征层大小。输入和输出是NCHW或NHWC格式,其中N是批尺寸,C是通道数,H是特征高度,W是特征宽度。滤波器是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是滤波器高度,W是滤波器宽度。如果组数(groups)大于1,C等于输入图像通道数除以组数的结果。详情请参考UFLDL's : `卷积 `_ 。如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 + +对每个输入X,有等式: + +.. math:: + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入值,NCHW或NHWC格式的4-D Tensor + - :math:`W` :滤波器值,MCHW格式的4-D Tensor + - :math:`*` :卷积操作 + - :math:`b` :偏置值,2-D Tensor,形状为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值,NCHW或NHWC格式的4-D Tensor, 和 ``X`` 的形状可能不同 + +**示例** + +- 输入: + + 输入形状::math:`(N,C_{in},H_{in},W_{in})` + + 滤波器形状: :math:`(C_{out},C_{in},H_{f},W_{f})` + +- 输出: + + 输出形状: :math:`(N,C_{out},H_{out},W_{out})` + +其中 + +.. math:: + + H_{out} &= \frac{\left ( H_{in} + padding\_height\_top + padding\_height\_bottom-\left ( dilation[0]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 + + W_{out} &= \frac{\left ( W_{in} + padding\_width\_left + padding\_width\_right -\left ( dilation[1]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 + +如果 ``padding`` = "SAME": + +.. math:: + H_{out} = \frac{(H_{in} + stride[0] - 1)}{stride[0]} + +.. math:: + W_{out} = \frac{(W_{in} + stride[1] - 1)}{stride[1]} + +如果 ``padding`` = "VALID": + +.. math:: + H_{out} = \frac{\left ( H_{in} -\left ( dilation[0]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 + + W_{out} = \frac{\left ( W_{in} -\left ( dilation[1]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 + +参数: + - **input** (Variable) - 形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 + - **num_filters** (int) - 滤波器(卷积核)的个数。和输出图像通道相同。 + - **filter_size** (int|list|tuple) - 滤波器大小。如果它是一个列表或元组,则必须包含两个整数值:(filter_size_height,filter_size_width)。若为一个整数,filter_size_height = filter_size_width = filter_size。 + - **stride** (int|list|tuple,可选) - 步长大小。滤波器和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含两个整型数:(stride_height,stride_width)。若为一个整数,stride_height = stride_width = stride。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - **dilation** (int|list|tuple,可选) - 膨胀比例大小。空洞卷积时会使用该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果膨胀比例为列表或元组,则必须包含两个整型数:(dilation_height,dilation_width)。若为一个整数,dilation_height = dilation_width = dilation。默认值:1。 + - **groups** (int,可选) - 二维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和滤波器分别根据通道数量平均分为n组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算,……,第n组滤波器和第n组输入进行卷积计算。默认值:1。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **use_cudnn** (bool,可选)- 是否使用cudnn内核。只有已安装cudnn库时才有效。默认值:True。 + - **act** (str,可选) - 激活函数类型, 如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` 。如果设为None,则未添加激活函数。默认值:None。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值:None。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + +返回:4-D Tensor,数据类型与 ``input`` 一致。如果未指定激活层,则返回卷积计算的结果,如果指定激活层,则返回卷积和激活计算之后的最终结果。 + +返回类型:Variable。 + +抛出异常: + - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和滤波器的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.layers.data(name='data', shape=[3, 32, 32], dtype='float32') + param_attr = fluid.ParamAttr(name='conv2d.weight', initializer=fluid.initializer.Xavier(uniform=False), learning_rate=0.001) + res = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(1, 3, 32, 32).astype("float32") + output = exe.run(feed={"data": x}, fetch_list=[res]) + print(output) + + + + + diff --git a/doc/paddle/api/paddle/static/nn/conv2d_transpose_cn.rst b/doc/paddle/api/paddle/static/nn/conv2d_transpose_cn.rst new file mode 100644 index 000000000..3223f0718 --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/conv2d_transpose_cn.rst @@ -0,0 +1,120 @@ +.. _cn_api_fluid_layers_conv2d_transpose: + +conv2d_transpose +------------------------------- + + +.. py:function:: paddle.fluid.layers.conv2d_transpose(input, num_filters, output_size=None, filter_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format='NCHW') + +:api_attr: 声明式编程模式(静态图) + + + +二维转置卷积层(Convlution2D transpose layer) + +该层根据输入(input)、滤波器(filter)和卷积核膨胀比例(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCHW或NHWC格式,其中N为批尺寸,C为通道数(channel),H为特征层高度,W为特征层宽度。滤波器是MCHW格式,M是输出图像通道数,C是输入图像通道数,H是滤波器高度,W是滤波器宽度。如果组数大于1,C等于输入图像通道数除以组数的结果。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解转置卷积层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 + +.. _参考文献: https://arxiv.org/pdf/1603.07285.pdf + + +输入 :math:`X` 和输出 :math:`Out` 函数关系如下: + +.. math:: + Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` : 输入,具有NCHW或NHWC格式的4-D Tensor + - :math:`W` : 滤波器,具有NCHW格式的4-D Tensor + - :math:`*` : 卷积计算(注意:转置卷积本质上的计算还是卷积) + - :math:`b` : 偏置(bias),2-D Tensor,形状为 ``[M,1]`` + - :math:`σ` : 激活函数 + - :math:`Out` : 输出值,NCHW或NHWC格式的4-D Tensor, 和 ``X`` 的形状可能不同 + +**示例** + +- 输入: + + 输入Tensor的形状: :math:`(N,C_{in}, H_{in}, W_{in})` + + 滤波器的形状 : :math:`(C_{in}, C_{out}, H_f, W_f)` + +- 输出: + + 输出Tensor的形状 : :math:`(N,C_{out}, H_{out}, W_{out})` + +其中 + +.. math:: + + & H'_{out} = (H_{in}-1)*strides[0] - pad\_height\_top - pad\_height\_bottom + dilations[0]*(H_f-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[1]- pad\_width\_left - pad\_width\_right + dilations[1]*(W_f-1)+1 \\ + & H_{out}\in[H'_{out},H'_{out} + strides[0])\\ + & W_{out}\in[W'_{out},W'_{out} + strides[1])\\ + +如果 ``padding`` = "SAME": + +.. math:: + & H'_{out} = \frac{(H_{in} + stride[0] - 1)}{stride[0]}\\ + & W'_{out} = \frac{(W_{in} + stride[1] - 1)}{stride[1]}\\ + +如果 ``padding`` = "VALID": + +.. math:: + & H'_{out} = (H_{in}-1)*strides[0] + dilations[0]*(H_f-1)+1\\ + & W'_{out} = (W_{in}-1)*strides[1] + dilations[1]*(W_f-1)+1 \\ + +注意: + +如果output_size为None,则 :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}` ;否则,指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[0]` 之间(不包含 :math:`H^\prime_{out} + strides[0]` ), 并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[1]` 之间(不包含 :math:`W^\prime_{out} + strides[1]` )。 + +由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 + +如果指定了output_size, ``conv2d_transpose`` 可以自动计算滤波器的大小。 + +参数: + - **input** (Variable)- 形状为 :math:`[N, C, H, W]` 或 :math:`[N, H, W, C]` 的4-D Tensor,N是批尺寸,C是通道数,H是特征高度,W是特征宽度。数据类型:float32或float64。 + - **num_filters** (int) - 滤波器(卷积核)的个数,与输出图片的通道数相同。 + - **output_size** (int|tuple,可选) - 输出图片的大小。如果output_size是一个元组,则必须包含两个整型数,(output_size_height,output_size_width)。如果output_size=None,则内部会使用filter_size、padding和stride来计算output_size。如果output_size和filter_size是同时指定的,那么它们应满足上面的公式。默认:None。output_size和filter_size不能同时为None。 + - **filter_size** (int|tuple,可选) - 滤波器大小。如果filter_size是一个元组,则必须包含两个整型数,(filter_size_height, filter_size_width)。否则,filter_size_height = filter_size_width = filter_size。如果filter_size=None,则必须指定output_size, ``conv2d_transpose`` 内部会根据output_size、padding和stride计算出滤波器大小。默认:None。output_size和filter_size不能同时为None。 + - **padding** (int|list|tuple|str,可选) - 填充padding大小。padding参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含4个二元组:当 ``data_format`` 为"NCHW"时为 [[0,0], [0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NHWC"时为[[0,0], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含4个整数值:[padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含2个整数值:[padding_height, padding_width],此时padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_height = padding_width = padding。默认值:0。 + - **stride** (int|tuple,可选) - 步长stride大小。滤波器和输入进行卷积计算时滑动的步长。如果stride是一个元组,则必须包含两个整型数,形式为(stride_height,stride_width)。否则,stride_height = stride_width = stride。默认:stride = 1。 + - **dilation** (int|tuple,可选) - 膨胀比例(dilation)大小。空洞卷积时会指该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息,根据 `可视化效果图 `_ 较好理解。如果膨胀比例dilation是一个元组,那么元组必须包含两个整型数,形式为(dilation_height, dilation_width)。否则,dilation_height = dilation_width = dilation。默认:dilation= 1。 + - **groups** (int,可选) - 二维转置卷积层的组数。从Alex Krizhevsky的CNN Deep论文中的群卷积中受到启发,当group=2时,输入和滤波器分别根据通道数量平均分为两组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算。默认:group = 1。 + - **param_attr** (ParamAttr,可选) :指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv2d_transpose算子默认的权重初始化是Xavier。 + - **bias_attr** (ParamAttr|False,可选)- 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv2d_transpose算子默认的偏置初始化是0.0。 + - **use_cudnn** (bool,可选) - 是否使用cudnn内核,只有已安装cudnn库时才有效。默认:True。 + - **act** (str,可选) - 激活函数类型,如果设置为None,则不使用激活函数。默认:None。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + +返回:4-D Tensor,数据类型与 ``input`` 一致。如果未指定激活层,则返回转置卷积计算的结果,如果指定激活层,则返回转置卷积和激活计算之后的最终结果。 + +返回类型:Variable + +抛出异常: + - ``ValueError`` : 如果输入的shape、filter_size、stride、padding和groups不匹配,抛出ValueError + - ``ValueError`` - 如果 ``data_format`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有4个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为None。 + - ``ShapeError`` - 如果输入不是4-D Tensor。 + - ``ShapeError`` - 如果输入和滤波器的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.layers.data(name='data', shape=[3, 32, 32], dtype='float32') + param_attr = fluid.ParamAttr(name='conv2d.weight', initializer=fluid.initializer.Xavier(uniform=False), learning_rate=0.001) + res = fluid.layers.conv2d_transpose(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(1, 3, 32, 32).astype("float32") + output = exe.run(feed={"data": x}, fetch_list=[res]) + print(output) + + diff --git a/doc/paddle/api/paddle/static/nn/conv3d_cn.rst b/doc/paddle/api/paddle/static/nn/conv3d_cn.rst new file mode 100644 index 000000000..7468274e7 --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/conv3d_cn.rst @@ -0,0 +1,116 @@ +.. _cn_api_fluid_layers_conv3d: + +conv3d +------------------------------- + + +.. py:function:: paddle.fluid.layers.conv3d(input, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format="NCDHW") + +:api_attr: 声明式编程模式(静态图) + + + +该OP是三维卷积层(convolution3D layer),根据输入、滤波器、步长(stride)、填充(padding)、膨胀比例(dilations)一组参数计算得到输出特征层大小。输入和输出是NCDHW或NDWHC格式,其中N是批尺寸,C是通道数,D是特征层深度,H是特征层高度,W是特征层宽度。三维卷积(Convlution3D)和二维卷积(Convlution2D)相似,但多了一维深度信息(depth)。如果bias_attr不为False,卷积计算会添加偏置项。如果指定了激活函数类型,相应的激活函数会作用在最终结果上。 + +对每个输入X,有等式: + +.. math:: + + Out = \sigma \left ( W * X + b \right ) + +其中: + - :math:`X` :输入值,NCDHW或NDHWC格式的5-D Tensor + - :math:`W` :滤波器值,MCDHW格式的5-D Tensor + - :math:`*` :卷积操作 + - :math:`b` :偏置值,2-D Tensor,形为 ``[M,1]`` + - :math:`\sigma` :激活函数 + - :math:`Out` :输出值, NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + +**示例** + +- 输入: + + 输入形状: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` + + 滤波器形状: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)` + +- 输出: + + 输出形状: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` + +其中 + +.. math:: + + D_{out} &= \frac{\left ( D_{in} + padding\_depth\_front + padding\_depth\_back-\left ( dilation[0]*\left ( D_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 + + H_{out} &= \frac{\left ( H_{in} + padding\_height\_top + padding\_height\_bottom-\left ( dilation[1]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 + + W_{out} &= \frac{\left ( W_{in} + padding\_width\_left + padding\_width\_right -\left ( dilation[2]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[2]}+1 + +如果 ``padding`` = "SAME": + +.. math:: + D_{out} = \frac{(D_{in} + stride[0] - 1)}{stride[0]} + + H_{out} = \frac{(H_{in} + stride[1] - 1)}{stride[1]} + + W_{out} = \frac{(W_{in} + stride[2] - 1)}{stride[2]} + +如果 ``padding`` = "VALID": + +.. math:: + D_{out} = \frac{\left ( D_{in} -\left ( dilation[0]*\left ( D_{f}-1 \right )+1 \right ) \right )}{stride[0]}+1 + + H_{out} = \frac{\left ( H_{in} -\left ( dilation[1]*\left ( H_{f}-1 \right )+1 \right ) \right )}{stride[1]}+1 + + W_{out} = \frac{\left ( W_{in} -\left ( dilation[2]*\left ( W_{f}-1 \right )+1 \right ) \right )}{stride[2]}+1 + +参数: + - **input** (Variable) - 形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型为float16, float32或float64。 + - **num_fliters** (int) - 滤波器(卷积核)的个数。和输出图像通道相同。 + - **filter_size** (int|list|tuple) - 滤波器大小。如果它是一个列表或元组,则必须包含三个整数值:(filter_size_depth, filter_size_height,filter_size_width)。若为一个整数,则filter_size_depth = filter_size_height = filter_size_width = filter_size。 + - **stride** (int|list|tuple,可选) - 步长大小。滤波器和输入进行卷积计算时滑动的步长。如果它是一个列表或元组,则必须包含三个整型数:(stride_depth, stride_height, stride_width)。若为一个整数,stride_depth = stride_height = stride_width = stride。默认值:1。 + - **padding** (int|list|tuple|str,可选) - 填充大小。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [padding_depth_front, padding_depth_back], [padding_height_top, padding_height_bottom], [padding_width_left, padding_width_right], [0,0]];(2)包含6个整数值:[padding_depth_front, padding_depth_back, padding_height_top, padding_height_bottom, padding_width_left, padding_width_right];(3)包含3个整数值:[padding_depth, padding_height, padding_width],此时 padding_depth_front = padding_depth_back = padding_depth, padding_height_top = padding_height_bottom = padding_height, padding_width_left = padding_width_right = padding_width。若为一个整数,padding_depth = padding_height = padding_width = padding。默认值:0。 + - **dilation** (int|list|tuple,可选) - 膨胀比例大小。空洞卷积时会使用该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息。如果膨胀比例为列表或元组,则必须包含三个整型数:(dilation_depth, dilation_height,dilation_width)。若为一个整数,dilation_depth = dilation_height = dilation_width = dilation。默认值:1。 + - **groups** (int,可选) - 三维卷积层的组数。根据Alex Krizhevsky的深度卷积神经网络(CNN)论文中的成组卷积:当group=n,输入和滤波器分别根据通道数量平均分为n组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算,……,第n组滤波器和第n组输入进行卷积计算。默认值:1。 + - **param_attr** (ParamAttr,可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选)- 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **use_cudnn** (bool,可选)- 是否使用cudnn内核。只有已安装cudnn库时才有效。默认值:True。 + - **act** (str,可选) - 激活函数类型, 如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` 。如果设为None,则未添加激活函数。默认值:None。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值:None。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + +返回:5-D Tensor,数据类型与 ``input`` 一致。如果未指定激活层,则返回卷积计算的结果,如果指定激活层,则返回卷积和激活计算之后的最终结果。 + +返回类型:Variable。 + +抛出异常: + - ``ValueError`` - 如果 ``use_cudnn`` 不是bool值。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + - ``ValueError`` - 如果 ``input`` 的通道数未被明确定义。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有5个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ShapeError`` - 如果输入不是5-D Tensor。 + - ``ShapeError`` - 如果输入和滤波器的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + - ``ShapeError`` - 如果输出的通道数不能被 ``groups`` 整除。 + + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.layers.data(name='data', shape=[3, 12, 32, 32], dtype='float32') + param_attr = fluid.ParamAttr(name='conv3d.weight', initializer=fluid.initializer.Xavier(uniform=False), learning_rate=0.001) + res = fluid.layers.conv3d(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(1, 3, 12, 32, 32).astype("float32") + output = exe.run(feed={"data": x}, fetch_list=[res]) + print(output) + + diff --git a/doc/paddle/api/paddle/static/nn/conv3d_transpose_cn.rst b/doc/paddle/api/paddle/static/nn/conv3d_transpose_cn.rst new file mode 100644 index 000000000..0331df5d6 --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/conv3d_transpose_cn.rst @@ -0,0 +1,124 @@ +.. _cn_api_fluid_layers_conv3d_transpose: + +conv3d_transpose +------------------------------- + + +.. py:function:: paddle.fluid.layers.conv3d_transpose(input, num_filters, output_size=None, filter_size=None, padding=0, stride=1, dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, act=None, name=None, data_format='NCDHW') + +:api_attr: 声明式编程模式(静态图) + + + +三维转置卷积层(Convlution3D transpose layer) + +该层根据输入(input)、滤波器(filter)和卷积核膨胀比例(dilations)、步长(stride)、填充(padding)来计算输出特征层大小或者通过output_size指定输出特征层大小。输入(Input)和输出(Output)为NCDHW或者NDHWC格式。其中N为批尺寸,C为通道数(channel),D为特征深度,H为特征层高度,W为特征层宽度。转置卷积的计算过程相当于卷积的反向计算。转置卷积又被称为反卷积(但其实并不是真正的反卷积)。欲了解卷积转置层细节,请参考下面的说明和 参考文献_ 。如果参数bias_attr不为False, 转置卷积计算会添加偏置项。如果act不为None,则转置卷积计算之后添加相应的激活函数。 + +.. _参考文献: http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf + +输入 :math:`X` 和输出 :math:`Out` 函数关系如下: + +.. math:: + \\Out=\sigma (W*X+b)\\ + +其中: + - :math:`X` : 输入,具有NCDHW或NDHWC格式的5-D Tensor + - :math:`W` : 滤波器,具有NCDHW格式的5-D Tensor + - :math:`*` : 卷积操作(注意:转置卷积本质上的计算还是卷积) + - :math:`b` : 偏置(bias),2-D Tensor,形状为 ``[M,1]`` + - :math:`σ` : 激活函数 + - :math:`Out` : 输出值,NCDHW或NDHWC格式的5-D Tensor,和 ``X`` 的形状可能不同 + +**示例** + +输入: + + 输入的shape::math:`(N,C_{in}, D_{in}, H_{in}, W_{in})` + + 滤波器的shape::math:`(C_{in}, C_{out}, D_f, H_f, W_f)` + + + +输出: + + 输出的shape::math:`(N,C_{out}, D_{out}, H_{out}, W_{out})` + + +其中: + +.. math:: + + & D'_{out}=(D_{in}-1)*strides[0] - pad\_depth\_front - pad\_depth\_back + dilations[0]*(D_f-1)+1\\ + & H'_{out}=(H_{in}-1)*strides[1] - pad\_height\_top - pad\_height\_bottom + dilations[1]*(H_f-1)+1\\ + & W'_{out}=(W_{in}-1)*strides[2] - pad\_width\_left - pad\_width\_right + dilations[2]*(W_f-1)+1\\ + & D_{out}\in[D'_{out},D'_{out} + strides[0])\\ + & H_{out}\in[H'_{out},H'_{out} + strides[1])\\ + & W_{out}\in[W'_{out},W'_{out} + strides[2])\\ + +如果 ``padding`` = "SAME": + +.. math:: + D'_{out} = \frac{(D_{in} + stride[0] - 1)}{stride[0]}\\ + H'_{out} = \frac{(H_{in} + stride[1] - 1)}{stride[1]}\\ + W'_{out} = \frac{(W_{in} + stride[2] - 1)}{stride[2]}\\ + +如果 ``padding`` = "VALID": + +.. math:: + D'_{out}=(D_{in}-1)*strides[0] + dilations[0]*(D_f-1)+1\\ + H'_{out}=(H_{in}-1)*strides[1] + dilations[1]*(H_f-1)+1\\ + W'_{out}=(W_{in}-1)*strides[2] + dilations[2]*(W_f-1)+1\\ + +注意: + +如果output_size为None,则 :math:`D_{out}` = :math:`D^\prime_{out}` , :math:`H_{out}` = :math:`H^\prime_{out}` , :math:`W_{out}` = :math:`W^\prime_{out}` ;否则,指定的output_size_depth(输出特征层的深度) :math:`D_{out}` 应当介于 :math:`D^\prime_{out}` 和 :math:`D^\prime_{out} + strides[0]` 之间(不包含 :math:`D^\prime_{out} + strides[0]` ),指定的output_size_height(输出特征层的高) :math:`H_{out}` 应当介于 :math:`H^\prime_{out}` 和 :math:`H^\prime_{out} + strides[1]` 之间(不包含 :math:`H^\prime_{out} + strides[1]` ), 并且指定的output_size_width(输出特征层的宽) :math:`W_{out}` 应当介于 :math:`W^\prime_{out}` 和 :math:`W^\prime_{out} + strides[2]` 之间(不包含 :math:`W^\prime_{out} + strides[2]` )。 + +由于转置卷积可以当成是卷积的反向计算,而根据卷积的输入输出计算公式来说,不同大小的输入特征层可能对应着相同大小的输出特征层,所以对应到转置卷积来说,固定大小的输入特征层对应的输出特征层大小并不唯一。 + +如果指定了output_size, ``conv3d_transpose`` 可以自动计算滤波器的大小。 + +参数: + - **input** (Variable)- 形状为 :math:`[N, C, D, H, W]` 或 :math:`[N, D, H, W, C]` 的5-D Tensor,N是批尺寸,C是通道数,D是特征深度,H是特征高度,W是特征宽度,数据类型:float32或float64。 + - **num_filters** (int) - 滤波器(卷积核)的个数,与输出的图片的通道数相同。 + - **output_size** (int|tuple,可选) - 输出图片的大小。如果output_size是一个元组,则必须包含三个整型数,(output_size_depth,output_size_height,output_size_width)。如果output_size=None,则内部会使用filter_size、padding和stride来计算output_size。如果output_size和filter_size是同时指定的,那么它们应满足上面的公式。默认:None。output_size和filter_size不能同时为None。 + - **filter_size** (int|tuple,可选) - 滤波器大小。如果filter_size是一个元组,则必须包含三个整型数,(filter_size_depth,filter_size_height, filter_size_width)。否则,filter_size_depth = filter_size_height = filter_size_width = filter_size。如果filter_size=None,则必须指定output_size, ``conv2d_transpose`` 内部会根据output_size、padding和stride计算出滤波器大小。默认:None。output_size和filter_size不能同时为None。 + - **padding** (int|list|tuple|str,可选) - 填充padding大小。padding参数在输入特征层每边添加 ``dilation * (kernel_size - 1) - padding`` 个0。如果它是一个字符串,可以是"VALID"或者"SAME",表示填充算法,计算细节可参考上述 ``padding`` = "SAME"或 ``padding`` = "VALID" 时的计算公式。如果它是一个元组或列表,它可以有3种格式:(1)包含5个二元组:当 ``data_format`` 为"NCDHW"时为 [[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]],当 ``data_format`` 为"NDHWC"时为[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]];(2)包含6个整数值:[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right];(3)包含3个整数值:[pad_depth, pad_height, pad_width],此时 pad_depth_front = pad_depth_back = pad_depth, pad_height_top = pad_height_bottom = pad_height, pad_width_left = pad_width_right = pad_width。若为一个整数,pad_depth = pad_height = pad_width = padding。默认值:0。 + - **stride** (int|tuple,可选) - 步长stride大小。滤波器和输入进行卷积计算时滑动的步长。如果stride是一个元组,那么元组的形式为(stride_depth,stride_height,stride_width)。否则,stride_depth = stride_height = stride_width = stride。默认:stride = 1。 + - **dilation** (int|tuple,可选) - 膨胀比例dilation大小。空洞卷积时会指该参数,滤波器对输入进行卷积时,感受野里每相邻两个特征点之间的空洞信息,根据 `可视化效果图 `_ 较好理解。如果膨胀比例dilation是一个元组,那么元组的形式为(dilation_depth,dilation_height, dilation_width)。否则,dilation_depth = dilation_height = dilation_width = dilation。默认:dilation= 1。 + - **groups** (int,可选) - 三维转置卷积层的组数。从Alex Krizhevsky的CNN Deep论文中的群卷积中受到启发,当group=2时,输入和滤波器分别根据通道数量平均分为两组,第一组滤波器和第一组输入进行卷积计算,第二组滤波器和第二组输入进行卷积计算。默认:group = 1。 + - **param_attr** (ParamAttr,可选) :指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv3d_transpose算子默认的权重初始化是Xavier。 + - **bias_attr** (ParamAttr|False,可选)- 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。conv3d_transpose算子默认的偏置初始化是0.0。 + - **use_cudnn** (bool,可选) - 是否使用cudnn内核,只有已安装cudnn库时才有效。默认:True。 + - **act** (str,可选) - 激活函数类型,如果设置为None,则不使用激活函数。默认:None。 + - **name** (str,可选) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 + - **data_format** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCDHW"和"NDHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCDHW"。 + +返回:5-D Tensor,数据类型与 ``input`` 一致。如果未指定激活层,则返回转置卷积计算的结果,如果指定激活层,则返回转置卷积和激活计算之后的最终结果。 + +返回类型:Variable + +抛出异常: + - ``ValueError`` - 如果输入的shape、filter_size、stride、padding和groups不匹配。 + - ``ValueError`` - 如果 ``data_format`` 既不是"NCDHW"也不是"NDHWC"。 + - ``ValueError`` - 如果 ``padding`` 是字符串,既不是"SAME"也不是"VALID"。 + - ``ValueError`` - 如果 ``padding`` 含有5个二元组,与批尺寸对应维度的值不为0或者与通道对应维度的值不为0。 + - ``ValueError`` - 如果 ``output_size`` 和 ``filter_size`` 同时为None。 + - ``ShapeError`` - 如果输入不是5-D Tensor。 + - ``ShapeError`` - 如果输入和滤波器的维度大小不相同。 + - ``ShapeError`` - 如果输入的维度大小与 ``stride`` 之差不是2。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.layers.data(name='data', shape=[3, 12, 32, 32], dtype='float32') + param_attr = fluid.ParamAttr(name='conv3d.weight', initializer=fluid.initializer.Xavier(uniform=False), learning_rate=0.001) + res = fluid.layers.conv3d_transpose(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(1, 3, 12, 32, 32).astype("float32") + output = exe.run(feed={"data": x}, fetch_list=[res]) + print(output) diff --git a/doc/paddle/api/paddle/static/nn/create_parameter_cn.rst b/doc/paddle/api/paddle/static/nn/create_parameter_cn.rst new file mode 100644 index 000000000..4176d7507 --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/create_parameter_cn.rst @@ -0,0 +1,44 @@ +.. _cn_api_fluid_layers_create_parameter: + +create_parameter +------------------------------- + + +.. py:function:: paddle.fluid.layers.create_parameter(shape,dtype,name=None,attr=None,is_bias=False,default_initializer=None) + +:api_attr: 声明式编程模式(静态图) + + + +该OP创建一个参数。该参数是一个可学习的变量, 拥有梯度并且可优化。 + +**注意:这是一个低级别的API。如果您希望自己创建新的op,这个API将非常有用,无需使用layers。** + +参数: + - **shape** (list[int]) - 指定输出Tensor的形状,它可以是一个整数列表。 + - **dtype** (str|numpy.dtype) – 初始化数据类型。可设置的字符串值有:"float16","float32","float64"。 + - **name** (str,可选) - 参数的名称。具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **attr** (ParamAttr,可选) - 指定参数的属性对象。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。默认值为None,表示将采用 ParamAttr 的默认方式初始化。 + - **is_bias** (bool,可选) - 当default_initializer为空,该值会对选择哪个默认初始化程序产生影响。如果is_bias为真,则使用initializer.Constant(0.0),否则使用Xavier(),默认值False。 + - **default_initializer** (Initializer,可选) - 参数的初始化程序,默认值为空。 + +返回:创建的Tensor变量。 + +返回类型:Variable。 + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.layers as layers + W = layers.create_parameter(shape=[784, 200], dtype='float32') + + + + + + + + + diff --git a/doc/paddle/api/paddle/static/nn/crf_decoding_cn.rst b/doc/paddle/api/paddle/static/nn/crf_decoding_cn.rst new file mode 100644 index 000000000..ea5fd7eb1 --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/crf_decoding_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_fluid_layers_crf_decoding: + +crf_decoding +------------------------------- + + +.. py:function:: paddle.fluid.layers.crf_decoding(input, param_attr, label=None, length=None) + +:api_attr: 声明式编程模式(静态图) + + + + + +该层读取由 :ref:`cn_api_fluid_layers_linear_chain_crf` 学习的 emission feature weights(发射状态特征的权重)和 transition feature weights (转移特征的权重) 进行解码。 +本层实现了 Viterbi 算法,可以动态地寻找隐藏状态最可能的序列,该序列也被称为 Viterbi 路径(Viterbi path),从而得到观察标签 (tags) 序列。 + +这个层运算的结果会随着输入 ``Label`` 的有无而改变: + + 1. ``Label`` 非 None 的情况,在实际训练中时常发生。此时本层会协同 :ref:`cn_api_fluid_layers_chunk_eval` 工作。在 LoDTensor 模式下,本层会返回一行形为 [N X 1] 的向量,在 padding 模式下,返回形状则为 [B x S],其中值为 0 的部分代表该 label 不适合作为对应结点的标注,值为1的部分则反之。此类型的输出可以直接作为 :ref:`cn_api_fluid_layers_chunk_eval` 算子的输入; + + 2. 当没有 ``Label`` 时,该函数会执行标准解码过程; + +(没有 ``Label`` 时)该运算返回一个形状为 [N X 1] 或 [B x S] 的向量,此处的形状取决于输入是 LoDTensor 还是普通 Tensor,其中元素取值范围为 0 ~ 最大标注个数-1,分别为预测出的标注(tag)所在的索引。 + +参数: + - **input** (Variable) — 一个形为 [N x D] 的 LoDTensor,其中 N 是mini-batch的大小,D是标注(tag) 的总数; 或者形为 [B x S x D] 的普通 Tensor,B 是批次大小,S 是序列最大长度,D 是标注的总数。 该输入是 :ref:`cn_api_fluid_layers_linear_chain_crf`` 的 unscaled emission weight matrix (未标准化的发射权重矩阵)。数据类型为 float32 或者 float64。 + - **param_attr** (ParamAttr,可选) :指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **label** (Variable,可选) — 形为 [N x 1] 的正确标注(ground truth)(LoDTensor 模式),或者形状为 [B x S]。 有关该参数的更多信息,请详见上述描述。数据类型为 int64。 + - **length** (Variable,可选) — 形状为 [B x 1], 表示输入序列的真实长度。该输入非 None,表示该层工作在 padding 模式下,即 ``input`` 和 ``label`` 都是带 padding 的普通 Tensor。数据类型为 int64。 + +返回:解码结果具体内容根据 ``Label`` 参数是否提供而定,请参照上面的介绍来详细了解。 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + images = fluid.layers.data(name='pixel', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int32') + hidden = fluid.layers.fc(input=images, size=2) + crf = fluid.layers.linear_chain_crf(input=hidden, label=label, + param_attr=fluid.ParamAttr(name="crfw")) + crf_decode = fluid.layers.crf_decoding(input=hidden, + param_attr=fluid.ParamAttr(name="crfw")) + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/static/nn/data_norm_cn.rst b/doc/paddle/api/paddle/static/nn/data_norm_cn.rst new file mode 100644 index 000000000..4954dfa22 --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/data_norm_cn.rst @@ -0,0 +1,65 @@ +.. _cn_api_fluid_layers_data_norm: + +data_norm +------------------------------- + + +.. py:function:: paddle.fluid.layers.data_norm(input, act=None, epsilon=1e-05, param_attr=None, data_layout='NCHW', in_place=False, name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False) + +:api_attr: 声明式编程模式(静态图) + + + +**数据正则化层** + +可用作conv2d和fully_connected操作的正则化函数。 此层所需的数据格式为以下之一: + +1. NHWC [batch, in_height, in_width, in_channels] +2. NCHW [batch, in_channels, in_height, in_width] + +:math:`input` 为一个mini-batch上的特征: + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//\ + \ mini-batch\ mean \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \ + \mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\ + \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift + +参数: + - **input** (Variable) - 输入变量,它是一个LoDTensor。 + - **act** (string,默认None) - 激活函数类型,线性| relu | prelu | ... + - **epsilon** (float,默认1e-05) - + - **param_attr** (ParamAttr) - 参数比例的参数属性。 + - **data_layout** (str,可选) - 指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **in_place** (bool,默认值False) - 使data_norm的输入和输出复用同一块内存。 + - **name** (string,默认None) - 此层的名称(可选)。 如果设置为None,则将自动命名该层。 + - **moving_mean_name** (string,Default None) - 存储全局Mean的moving_mean的名称。 + - **moving_variance_name** (string,默认None) - 存储全局Variance的moving_variance的名称。 + - **do_model_average_for_mean_and_var** (bool,默认值为false) - 是否为mean和variance进行模型平均。 + - **slot_dim** (int, 默认值为-1) - 一个slot的embedding维度,slot用来表征一类特征的集合,在pslib模式下,通常我们通过slot区分特征id,并从参数服务器(pslib)中提取它们的embedding。embedding的第一维是历史上这个embedding展示的次数。如果本op的输入是由这样的embedding连接而来,那么当这个特征id是新的或空的,则正则化结果可能不实际。为了避免这种情况,我们添加了slot_dim来定位并判断这一维是否为零。如果是的话,我们选择跳过正则化。 + - **summary_decay_rate** (float, 默认值为0.9999999) - 更新summary信息时的衰减率。 + - **sync_stats** (bool, 默认值False) - 在多GPU卡的场景下可以使用,用来同步多卡间的summary信息。 + - **enable_scale_and_shift** (bool, 默认值False) - 在分布式全局正则化后是否做像batchnorm一样做scale&shift的操作。 + +返回: 张量变量,是对输入数据进行正则化后的结果。 + +返回类型: Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + hidden1 = fluid.layers.data(name="hidden1", shape=[200]) + hidden2 = fluid.layers.data_norm(name="hidden2", input=hidden1) + + + + + + + diff --git a/doc/paddle/api/paddle/static/nn/deformable_conv_cn.rst b/doc/paddle/api/paddle/static/nn/deformable_conv_cn.rst new file mode 100644 index 000000000..a74315d5a --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/deformable_conv_cn.rst @@ -0,0 +1,102 @@ +.. _cn_api_fluid_layers_deformable_conv: + +deformable_conv +------------------------------- + + +.. py:function:: paddle.fluid.layers.deformable_conv(input, offset, mask, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, deformable_groups=None, im2col_step=None, param_attr=None, bias_attr=None, modulated=True, name=None) + +:api_attr: 声明式编程模式(静态图) + + + +**可变形卷积算子** + +deformable_conv op对输入4-D Tensor计算2-D可变形卷积。给定输入Tensor x,输出Tensor y,可变形卷积运算如下所示: + +可形变卷积v2: + + :math:`y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}` + +可形变卷积v1: + + :math:`y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}` + +其中 :math:`\Delta p_k` 和 :math:`\Delta m_k` 分别为第k个位置的可学习偏移和调制标量。在deformable_conv_v1中 :math:`\Delta m_k` 为1. + +具体细节可以参考论文:`<> `_ 和 `<> `_ 。 + +**示例** + +输入: + input 形状: :math:`(N, C_{in}, H_{in}, W_{in})` + + 卷积核形状: :math:`(C_{out}, C_{in}, H_f, W_f)` + + offset 形状: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})` + + mask 形状: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})` + +输出: + 输出形状: :math:`(N, C_{out}, H_{out}, W_{out})` + +其中 + +.. math:: + + H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 + + W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 + + +参数: + - **input** (Variable) - 形状为 :math:`[N, C, H, W]` 的输入Tensor,数据类型为float32或float64。 + - **offset** (Variable) – 可变形卷积层的输入坐标偏移,数据类型为float32或float64。 + - **mask** (Variable, 可选) – 可变形卷积层的输入掩码,当使用可变形卷积算子v1时,请将mask设置为None, 数据类型为float32或float64。 + - **num_filters** (int) – 卷积核数,与输出Tensor通道数相同。 + - **filter_size** (int|tuple) – 卷积核大小。如果filter_size为元组,则必须包含两个整数(filter_size_H, filter_size_W)。若数据类型为int,卷积核形状为(filter_size, filter_size)。 + - **stride** (int|tuple) – 步长大小。如果stride为元组,则必须包含两个整数(stride_H, stride_W)。否则stride_H = stride_W = stride。默认值为1。 + - **padding** (int|tuple) – padding大小。如果padding为元组,则必须包含两个整数(padding_H, padding_W)。否则padding_H = padding_W = padding。默认值为0。 + - **dilation** (int|tuple) – dilation大小。如果dilation为元组,则必须包含两个整数(dilation_H, dilation_W)。否则dilation_H = dilation_W = dilation。默认值为1。 + - **groups** (int) – 卷积组数。依据Alex Krizhevsky的Deep CNN论文中的分组卷积,有:当group=2时,前一半卷积核只和前一半输入通道有关,而后一半卷积核只和后一半输入通道有关。缺省值为1。 + - **deformable_groups** (int) – 可变形卷积组数。默认值为1。 + - **im2col_step** (int) – 每个im2col计算的最大图像数。总batch大小应可以被该值整除或小于该值。如果您面临内存问题,可以尝试在此处使用一个较小的值。默认值为64。 + - **param_attr** (ParamAttr,可选) – 可变形卷积的可学习权重的属性。如果将其设置为None或某种ParamAttr,可变形卷积将创建ParamAttr作为param_attr。如果没有设置此param_attr的Initializer,该参数将被Normal(0.0, std)初始化,且其中的std为 :math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}` 。默认值为None。 + - **bias_attr** (ParamAttr|bool,可选) – 可变形卷积层的偏置的参数属性。如果设为False,则输出单元不会加偏置。如果设为None或者某种ParamAttr,conv2d会创建ParamAttr作为bias_attr。如果不设置bias_attr的Initializer,偏置会被初始化为0。默认值为None。 + - **modulated** (bool)- 确定使用v1和v2中的哪个版本,如果为True,则选择使用v2。默认值为True。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:可变形卷积输出的4-D Tensor,数据类型为float32或float64。 + +返回类型:Variable + +抛出异常:ValueError – 如果input, filter_size, stride, padding和groups的大小不匹配。 + +**代码示例** + +.. code-block:: python + + #deformable conv v2: + + import paddle.fluid as fluid + C_in, H_in, W_in = 3, 32, 32 + filter_size, deformable_groups = 3, 1 + data = fluid.layers.data(name='data', shape=[C_in, H_in, W_in], dtype='float32') + offset = fluid.layers.data(name='offset', shape=[2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') + mask = fluid.layers.data(name='mask', shape=[deformable_groups*filter_size**2, H_in, W_in], dtype='float32') + out = fluid.layers.deformable_conv(input=data, offset=offset, mask=mask, + num_filters=2, filter_size=filter_size, padding=1, modulated=True) + + #deformable conv v1: + + import paddle.fluid as fluid + C_in, H_in, W_in = 3, 32, 32 + filter_size, deformable_groups = 3, 1 + data = fluid.layers.data(name='data', shape=[C_in, H_in, W_in], dtype='float32') + offset = fluid.layers.data(name='offset', shape=[2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') + out = fluid.layers.deformable_conv(input=data, offset=offset, mask=None, + num_filters=2, filter_size=filter_size, padding=1, modulated=False) + + + + diff --git a/doc/paddle/api/paddle/static/nn/embedding_cn.rst b/doc/paddle/api/paddle/static/nn/embedding_cn.rst new file mode 100644 index 000000000..ee70bead5 --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/embedding_cn.rst @@ -0,0 +1,97 @@ +.. _cn_api_fluid_embedding: + +embedding +------------------------------- + + +.. py:function:: paddle.fluid.embedding(input, size, is_sparse=False, is_distributed=False, padding_idx=None, param_attr=None, dtype='float32') + +:api_attr: 声明式编程模式(静态图) + + + +该OP根据input中的id信息从embedding矩阵中查询对应embedding信息,函数会根据输入的size (vocab_size, emb_size)和dtype自动构造一个二维embedding矩阵。 + +输出的Tensor的shape是在输入Tensor shape的最后一维后面添加了emb_size的维度。 + +注:input中的id必须满足 ``0 =< id < size[0]``,否则程序会抛异常退出。 + + +:: + + Case 1: + + input是Tensor, 且padding_idx = -1 + input.data = [[1, 3], [2, 4], [4, 127]] + input.shape = [3, 2] + 若size = [128, 16] + 输出为Tensor: + out.shape = [3, 2, 16] + out.data = [[[0.129435295, 0.244512452, ..., 0.436322452], + [0.345421456, 0.524563927, ..., 0.144534654]], + + [[0.345249859, 0.124939536, ..., 0.194353745], + [0.945345345, 0.435394634, ..., 0.435345365]], + + [[0.945345345, 0.435394634, ..., 0.435345365], + [0.0, 0.0, ..., 0.0 ]]] # padding data + 输入的padding_idx小于0,则自动转换为padding_idx = -1 + 128 = 127, 对于输入id为127的词,进行padding处理。 + + Case 2: + + input是lod level 为1的LoDTensor, 且padding_idx = 0 + input.lod = [[2, 3]] + input.data = [[1], [3], [2], [4], [0]] + input.shape = [5, 1] + 若size = [128, 16] + 输出为LoDTensor: + out.lod = [[2, 3]] + out.shape = [5, 1, 16] + out.data = [[[0.129435295, 0.244512452, ..., 0.436322452]], + [[0.345421456, 0.524563927, ..., 0.144534654]], + [[0.345249859, 0.124939536, ..., 0.194353745]], + [[0.945345345, 0.435394634, ..., 0.435345365]], + [[0.0, 0.0, ..., 0.0 ]]] # padding data + 输入的padding_idx = 0,则对于输入id为0的词,进行padding处理。 + + +参数: + - **input** (Variable) - 存储id信息的Tensor或LoDTensor,数据类型必须为:int64。input中的id必须满足 ``0 =< id < size[0]`` 。 + - **size** (tuple|list) - embedding矩阵的维度。必须包含两个元素,第一个元素为vocab_size(词表大小), 第二个为emb_size(embedding层维度)。 + - **is_sparse** (bool) - 是否使用稀疏的更新方式,这个参数只会影响反向的梯度更新的性能,sparse更新速度更快,推荐使用稀疏更新的方式。但某些optimizer不支持sparse更新,比如 :ref:`cn_api_fluid_optimizer_AdadeltaOptimizer` 、 :ref:`cn_api_fluid_optimizer_AdamaxOptimizer` 、 :ref:`cn_api_fluid_optimizer_DecayedAdagradOptimizer` 、 :ref:`cn_api_fluid_optimizer_FtrlOptimizer` 、 :ref:`cn_api_fluid_optimizer_LambOptimizer` 、:ref:`cn_api_fluid_optimizer_LarsMomentumOptimizer` ,此时is_sparse必须为False。默认为False。 + - **is_distributed** (bool) - 是否使用分布式的方式存储embedding矩阵,仅在多机分布式cpu训练中使用。默认为False。 + - **padding_idx** (int|long|None) - padding_idx需在区间[-vocab_size, vocab_size),否则不生效,padding_idx<0时,padding_idx 会被改成 vocab_size + padding_idx,input中等于padding_index的id对应的embedding信息会被设置为0,且这部分填充数据在训练时将不会被更新。如果为none,不作处理,默认为None。 + - **param_attr** (ParamAttr) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。此外,可以通过 ``param_attr`` 参数加载用户自定义或预训练的词向量。只需将本地词向量转为numpy数据格式,且保证本地词向量的shape和embedding的 ``size`` 参数一致,然后使用 :ref:`cn_api_fluid_initializer_NumpyArrayInitializer` 进行初始化,即可实现加载自定义或预训练的词向量。详细使用方法见代码示例2。 + - **dtype** (str|core.VarDesc.VarType) - 输出Tensor或LoDTensor的数据类型,数据类型必须为:float32,float64,默认为float32。 + +返回:input映射后embedding Tensor或LoDTensor,数据类型和dtype定义的类型一致。 + +返回类型:Variable + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1) + + # 示例 1 + emb_1 = fluid.embedding(input=data, size=[128, 64]) + + # 示例 2: 加载用户自定义或预训练的词向量 + weight_data = np.random.random(size=(128, 100)) # numpy格式的词向量数据 + w_param_attrs = fluid.ParamAttr( + name="emb_weight", + learning_rate=0.5, + initializer=fluid.initializer.NumpyArrayInitializer(weight_data), + trainable=True) + emb_2 = fluid.embedding(input=data, size=(128, 100), param_attr=w_param_attrs, dtype='float32') + + + + + + + + + diff --git a/doc/paddle/api/paddle/static/nn/fc_cn.rst b/doc/paddle/api/paddle/static/nn/fc_cn.rst new file mode 100644 index 000000000..6613b2d88 --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/fc_cn.rst @@ -0,0 +1,114 @@ +.. _cn_api_fluid_layers_fc: + +fc +------------------------------- + + +.. py:function:: paddle.fluid.layers.fc(input, size, num_flatten_dims=1, param_attr=None, bias_attr=None, act=None, name=None) + +:api_attr: 声明式编程模式(静态图) + + + + +**全连接层** + +该OP将在神经网络中构建一个全连接层。其输入可以是一个Tensor(或LoDTensor)或多个Tensor(或LoDTensor)组成的list(详见参数说明),该OP会为每个输入的Tensor创建一个权重(weights)变量,即一个从每个输入单元到每个输出单元的全连接权重矩阵。FC层将每个输入Tensor和其对应的权重(weights)相乘得到shape为 :math:`[M, size]` 输出Tensor,其中 ``M`` 为batch_size大小。如果有多个输入Tensor,则多个shape为 :math:`[M, size]` 的Tensor计算结果会被累加起来,作为最终输出。如果 ``bias_attr`` 非空,则会创建一个偏置变量(bias variable),并把它累加到输出结果中。如果 ``act`` 非空,将会在输出结果上应用相应的激活函数。 + +当输入为单个Tensor(或LoDTensor): + +.. math:: + + \\Out = Act({XW + b})\\ + + + +当输入为多个Tensor(或LoDTensor)组成的list时: + +.. math:: + + \\Out=Act(\sum^{N-1}_{i=0}X_iW_i+b) \\ + + +上述等式中: + - :math:`N` :输入的数目,如果输入是Tensor列表,N等于len(input) + - :math:`X_i` :第i个输入的Tensor + - :math:`W_i` :对应第i个输入张量的第i个权重矩阵 + - :math:`b` :该层创建的bias参数 + - :math:`Act` :activation function(激活函数) + - :math:`Out` :输出Tensor + +:: + + Case 1: + 给定单个输入Tensor data_1, 且num_flatten_dims = 2: + data_1.data = [[[0.1, 0.2], + [0.3, 0.4]]] + data_1.shape = (1, 2, 2) # 1是batch_size + + out = fluid.layers.fc(input=data_1, size=1, num_flatten_dims=2) + + 则输出为: + out.data = [[0.83234344], [0.34936576]] + out.shape = (1, 2, 1) + + + Case 2: + 给定多个Tensor组成的list: + data_1.data = [[[0.1, 0.2], + [0.3, 0.4]]] + data_1.shape = (1, 2, 2) # 1 是 batch_size + + data_2 = [[[0.1, 0.2, 0.3]]] + data_2.shape = (1, 1, 3) + + out = fluid.layers.fc(input=[data_1, data_2], size=2) + + 则输出为: + out.data = [[0.18669507, 0.1893476]] + out.shape = (1, 2) + + +参数: + - **input** (Variable|list of Variable) – 维度为 :math:`[N_1, N_2, ..., N_k]` 的多维Tensor(或LoDTensor)或由多个Tensor(或LoDTensor)组成的list,输入Tensor的shape至少是2。数据类型为float32或float64。 + - **size** (int) – 全连接层输出单元的数目,即输出Tensor(或LoDTensor)特征维度。 + - **num_flatten_dims** (int) – 输入可以接受维度大于2的Tensor。在计算时,输入首先会被扁平化(flatten)为一个二维矩阵,之后再与权重(weights)相乘。参数 ``num_flatten_dims`` 决定了输入Tensor的flatten方式: 前 ``num_flatten_dims`` (包含边界,从1开始数) 个维度会被扁平化为二维矩阵的第一维 (即为矩阵的高), 剩下的 :math:`rank(X) - num\_flatten\_dims` 维被扁平化为二维矩阵的第二维 (即矩阵的宽)。 例如, 假设X是一个五维的Tensor,其shape为(2, 3, 4, 5, 6), 若 :math:`num\_flatten\_dims = 3` ,则扁平化的矩阵shape为: :math:`(2 x 3 x 4, 5 x 6) = (24, 30)` ,最终输出Tensor的shape为 :math:`(2, 3, 4, size)` 。默认为1。 + - **param_attr** (ParamAttr) – 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr) – 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str) – 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值为None。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回:经过全连接层计算后的Tensor或LoDTensor,数据类型与input类型一致。 + +返回类型: Variable + +弹出异常:``ValueError`` - 如果输入Tensor(或LoDTensor)的维度小于2 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # 当输入为单个张量时 + + data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.layers.fc(input=data, size=1000, act="tanh") + + # 当输入为多个张量时 + data_1 = fluid.layers.data(name="data_1", shape=[32, 32], dtype="float32") + data_2 = fluid.layers.data(name="data_2", shape=[24, 36], dtype="float32") + fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh") + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/static/nn/group_norm_cn.rst b/doc/paddle/api/paddle/static/nn/group_norm_cn.rst new file mode 100755 index 000000000..049a9e2f0 --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/group_norm_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_layers_group_norm: + +group_norm +------------------------------- + + +.. py:function:: paddle.fluid.layers.group_norm(input, groups, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, data_layout='NCHW', name=None) + +:api_attr: 声明式编程模式(静态图) + + + +参考论文: `Group Normalization `_ + +参数: + - **input** (Variable):输入为4-D Tensor,数据类型为float32或float64。 + - **groups** (int):从 channel 中分离出来的 group 的数目,数据类型为int32。 + - **epsilon** (float,可选):为防止方差除以零,增加一个很小的值。数据类型为float32。默认值:1e-05。 + - **param_attr** (ParamAttr|bool,可选) :指定权重参数属性的对象。若 ``param_attr`` 为bool类型,只支持为False,表示没有权重参数。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr|bool,可选) : 指定偏置参数属性的对象。若 ``bias_attr`` 为bool类型,只支持为False,表示没有偏置参数。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str,可选):将激活应用于输出的 group normalizaiton。 + - **data_layout** (str,可选):指定输入的数据格式,输出的数据格式将与输入保持一致,可以是"NCHW"和"NHWC"。N是批尺寸,C是通道数,H是特征高度,W是特征宽度。默认值:"NCHW"。 + - **name** (str,可选):具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + +返回:4-D Tensor,数据类型和格式与 `input` 一致。 + +返回类型:Variable + +抛出异常: + - ``ValueError`` - 如果 ``data_layout`` 既不是"NCHW"也不是"NHWC"。 + - ``ValueError`` - 如果 ``groups`` 小于1,或者 ``groups`` 大于输入的通道数。 + - ``ShapeError`` - 如果 ``param_attr`` (Scale) 或者 ``bias_attr`` (Bias) 不是 1-D Tensor。 + - ``ShapeError`` - 如果 ``param_attr`` (Scale) 或者 ``bias_attr`` (Bias) 的大小与输入的通道数不相等。 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.data(name='data', shape=[None, 8, 32, 32], dtype='float32') + x = fluid.layers.group_norm(input=data, groups=4) + + + + + + + + + + diff --git a/doc/paddle/api/paddle/static/nn/hsigmoid_cn.rst b/doc/paddle/api/paddle/static/nn/hsigmoid_cn.rst new file mode 100644 index 000000000..fa8a9704f --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/hsigmoid_cn.rst @@ -0,0 +1,60 @@ +.. _cn_api_fluid_layers_hsigmoid: + +hsigmoid +------------------------------- + + +.. py:function:: paddle.fluid.layers.hsigmoid(input, label, num_classes, param_attr=None, bias_attr=None, name=None, path_table=None, path_code=None, is_custom=False, is_sparse=False) + +:api_attr: 声明式编程模式(静态图) + + + +层次sigmoid(hierarchical sigmoid),该OP通过构建一个分类二叉树来降低计算复杂度,主要用于加速语言模型的训练过程。 + +该OP建立的二叉树中每个叶节点表示一个类别(单词),每个非叶子节点代表一个二类别分类器(sigmoid)。对于每个类别(单词),都有一个从根节点到它的唯一路径,hsigmoid累加这条路径上每个非叶子节点的损失得到总损失。 + +相较于传统softmax的计算复杂度 :math:`O(N)` ,hsigmoid可以将计算复杂度降至 :math:`O(logN)` ,其中 :math:`N` 表示类别总数(字典大小)。 + +若使用默认树结构,请参考 `Hierarchical Probabilistic Neural Network Language Model `_ 。 + +若使用自定义树结构,请将参数 ``is_custom`` 设置为True,并完成以下步骤(以语言模型为例): + +1. 使用自定义词典来建立二叉树,每个叶结点都应该是词典中的单词; + +2. 建立一个dict类型数据结构,用于存储 **单词id -> 该单词叶结点至根节点路径** 的映射,即路径表 ``path_table`` 参数; + +3. 建立一个dict类型数据结构,用于存储 **单词id -> 该单词叶结点至根节点路径的编码** 的映射,即路径编码 ``path_code`` 参数。 编码是指每次二分类的标签,1为真,0为假; + +4. 每个单词都已经有自己的路径和路径编码,当对于同一批输入进行操作时,可以同时传入一批路径和路径编码进行运算。 + +参数: + - **input** (Variable) - 输入Tensor。数据类型为float32或float64,形状为 ``[N, D]`` ,其中 ``N`` 为minibatch的大小,``D`` 为特征大小。 + - **label** (Variable) - 训练数据的标签。数据类型为int64,形状为 ``[N, 1]`` 。 + - **num_classes** (int) - 类别总数(字典大小)必须大于等于2。若使用默认树结构,即当 ``is_custom=False`` 时 ,必须设置该参数。若使用自定义树结构,即当 ``is_custom=True`` 时,它取值应为自定义树结构的非叶节点的个数,用于指定二分类的类别总数。 + - **param_attr** (ParamAttr,可选) - 该OP可学习参数的属性。可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。 该OP将利用 ``param_attr`` 属性来创建ParamAttr实例。如果没有设置 ``param_attr`` 的初始化函数,那么参数将采用Xavier初始化。默认值为None。 + - **bias_attr** (ParamAttr, 可选) - 该OP的偏置参数的属性。可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。 该OP将利用 ``bias_attr`` 属性来创建ParamAttr实例。如果没有设置 ``bias_attr`` 的初始化函数,参数初始化为0.0。默认值为None。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **path_table** (Variable,可选) – 存储每一批样本从类别(单词)到根节点的路径,按照从叶至根方向存储。 数据类型为int64,形状为 ``[N, L]`` ,其中L为路径长度。``path_table`` 和 ``path_code`` 应具有相同的形状, 对于每个样本i,path_table[i]为一个类似np.ndarray的结构,该数组内的每个元素都是其双亲结点权重矩阵的索引。默认值为None。 + - **path_code** (Variable,可选) – 存储每一批样本从类别(单词)到根节点的路径编码,按从叶至根方向存储。数据类型为int64,形状为 ``[N, L]``。默认值为None。 + - **is_custom** (bool,可选) – 是否使用用户自定义二叉树取代默认二叉树结构。如果设置为True,请务必设置 ``path_table`` , ``path_code`` , ``num_classes`` ,否则必须设置num_classes。默认值为False。 + - **is_sparse** (bool,可选) – 是否使用稀疏更新方式。如果设置为True,W的梯度和输入梯度将会变得稀疏。默认值为False。 + +返回: 层次sigmoid计算后的Tensor,形状为[N, 1],数据类型和 ``input`` 一致。 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.fill_constant(shape=[4, 3], value=0.9, dtype='float32') + # x = [[0.9, 0.9, 0.9], [0.9, 0.9, 0.9], [0.9, 0.9, 0.9], [0.9, 0.9, 0.9]] + y = fluid.layers.fill_constant( + shape=[4, 1], value=1, dtype='int64') + # y = [[1], [1], [1], [1]] + out = fluid.layers.hsigmoid(input=x, label=y, num_classes=2, param_attr=fluid.initializer.Constant( + value=0.05), bias_attr=fluid.initializer.Constant(value=.0)) + # out = [[0.62792355], [0.62792355], [0.62792355], [0.62792355]] diff --git a/doc/paddle/api/paddle/static/nn/instance_norm_cn.rst b/doc/paddle/api/paddle/static/nn/instance_norm_cn.rst new file mode 100644 index 000000000..a10ff9390 --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/instance_norm_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_fluid_layers_instance_norm: + +instance_norm +------------------------------- + + +.. py:function:: paddle.fluid.layers.instance_norm(input, epsilon=1e-05, param_attr=None, bias_attr=None, name=None) + +:api_attr: 声明式编程模式(静态图) + + + + +可用作卷积和全连接操作的实例正则化函数,根据每个样本的每个通道的均值和方差信息进行正则化。该层需要的数据格式如下: + +NCHW[batch,in_channels,in_height,in_width] + +更多详情请参考 : `Instance Normalization: The Missing Ingredient for Fast Stylization `_ + +``input`` 是mini-batch的输入。 + +.. math:: + \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \quad &// mean of each channel in each sample in a batch \\ + \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i - \mu_{\beta})^2 \quad &// variance of each channel in each sample a batch \\ + \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{\sigma_{\beta}^{2} + \epsilon}} \quad &// normalize \\ + y_i &\gets \gamma \hat{x_i} + \beta \quad &// scale-and-shift + + +参数: + - **input** (Variable) - instance_norm算子的输入特征,是一个Variable类型,输入的维度可以为 2, 3, 4, 5。数据类型:float32和float64。 + - **epsilon** (float,默认1e-05)-为了当前输入做标准化时得到稳定的结果而加在的分母上的扰动值。默认值为1e-5。 + - **param_attr** (ParamAttr|None) - instance_norm 权重参数的属性,可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。 如果设为None,则默认的参数初始化为1.0。如果在ParamAttr指定了属性时, instance_norm创建相应属性的param_attr(权重)参数。默认:None。 + - **bias_attr** (ParamAttr|None) - instance_norm 偏置参数的属性,可以设置为None或者一个ParamAttr的类(ParamAttr中可以指定参数的各种属性)。如果设为None,默认的参数初始化为0.0。如果在ParamAttr指定了参数的属性时, instance_norm创建相应属性的bias_attr(偏置)参数。默认:None。 + - **name** (string,默认None)- 该层名称(可选)。若设为None,则自动为该层命名。 + +返回: 张量,在输入中运用instance normalization后的结果 + +返回类型:变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + x = fluid.layers.data(name='x', shape=[3, 7, 3, 7], dtype='float32', append_batch_size=False) + hidden1 = fluid.layers.fc(input=x, size=200) + param_attr = fluid.ParamAttr(name='instance_norm_w', initializer=fluid.initializer.Constant(value=1.0)) + bias_attr = fluid.ParamAttr(name='instance_norm_b', initializer=fluid.initializer.Constant(value=0.0)) + hidden2 = fluid.layers.instance_norm(input=hidden1, param_attr = param_attr, bias_attr = bias_attr) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + np_x = np.random.random(size=(3, 7, 3, 7)).astype('float32') + output = exe.run(feed={"x": np_x}, fetch_list = [hidden2]) + print(output) + diff --git a/doc/paddle/api/paddle/static/nn/layer_norm_cn.rst b/doc/paddle/api/paddle/static/nn/layer_norm_cn.rst new file mode 100644 index 000000000..477b6e6ba --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/layer_norm_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_fluid_layers_layer_norm: + +layer_norm +------------------------------- + + +.. py:function:: paddle.fluid.layers.layer_norm(input, scale=True, shift=True, begin_norm_axis=1, epsilon=1e-05, param_attr=None, bias_attr=None, act=None, name=None) + +:api_attr: 声明式编程模式(静态图) + + + +该OP实现了层归一化层(Layer Normalization Layer),其可以应用于小批量输入数据。更多详情请参考:`Layer Normalization `_ + +计算公式如下 + +.. math:: + \\\mu=\frac{1}{H}\sum_{i=1}^{H}x_i\\ + + \\\sigma=\sqrt{\frac{1}{H}\sum_i^H{(x_i-\mu)^2} + \epsilon}\\ + + \\y=f(\frac{g}{\sigma}(x-\mu) + b)\\ + +- :math:`x` : 该层神经元的向量表示 +- :math:`H` : 层中隐藏神经元个数 +- :math:`\epsilon` : 添加较小的值到方差中以防止除零 +- :math:`g` : 可训练的比例参数 +- :math:`b` : 可训练的偏差参数 + +参数: + - **input** (Variable) - 维度为任意维度的多维 ``Tensor`` ,数据类型为float32或float64。 + - **scale** (bool, 可选) - 指明是否在归一化后学习自适应增益 ``g`` 。默认值:True。 + - **shift** (bool, 可选) - 指明是否在归一化后学习自适应偏差 ``b`` 。默认值:True。 + - **begin_norm_axis** (int, 可选) - 指明归一化将沿着 ``begin_norm_axis`` 到 ``rank(input)`` 的维度执行。默认值:1。 + - **epsilon** (float, 可选) - 指明在计算过程中是否添加较小的值到方差中以防止除零。默认值:1e-05。 + - **param_attr** (ParamAttr, 可选) - 指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr, 可选) - 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **act** (str, 可选) - 应用于输出上的激活函数,如tanh、softmax、sigmoid,relu等,支持列表请参考 :ref:`api_guide_activations` ,默认值为None。 + - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。。 + +返回:表示归一化结果的 ``Tensor`` ,数据类型和 ``input`` 一致,返回维度和 ``input`` 一致。 + +返回类型:Variable + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + x = fluid.layers.data(name='x', shape=[3, 32, 32], dtype='float32') + hidden1 = fluid.layers.layer_norm(input=x, begin_norm_axis=1) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + np_x = np.random.random(size=(8, 3, 32, 32)).astype('float32') + output = exe.run(feed={"x": np_x}, fetch_list = [hidden1]) + print(output) + diff --git a/doc/paddle/api/paddle/static/nn/multi_box_head_cn.rst b/doc/paddle/api/paddle/static/nn/multi_box_head_cn.rst new file mode 100644 index 000000000..a8fb7337f --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/multi_box_head_cn.rst @@ -0,0 +1,112 @@ +.. _cn_api_fluid_layers_multi_box_head: + +multi_box_head +------------------------------- + + +.. py:function:: paddle.fluid.layers.multi_box_head(inputs, image, base_size, num_classes, aspect_ratios, min_ratio=None, max_ratio=None, min_sizes=None, max_sizes=None, steps=None, step_w=None, step_h=None, offset=0.5, variance=[0.1, 0.1, 0.2, 0.2], flip=True, clip=False, kernel_size=1, pad=0, stride=1, name=None, min_max_aspect_ratios_order=False) + +:api_attr: 声明式编程模式(静态图) + + + +基于SSD(Single Shot MultiBox Detector)算法,在不同层输入特征上提取先验框、计算回归的坐标位置和分类的置信度,并合并到一起作为输出,具体参数解释和输出格式参考下面说明。更详细信息,请参阅SSD论文 `SSD:Single Shot MultiBox Detector `_ 的2.2节。 + +参数: + - **inputs** (list(Variable) | tuple(Variable)) - 输入特征的列表,仅支持格式为NCHW的4-D Tensor。 + - **image** (Variable) - 一般是网络输入的图像数据,仅支持NCHW格式。 + - **base_size** (int) - 输入图片的大小,当输入个数len(inputs) > 2,并且 ``min_size`` 和 ``max_size`` 为None时,通过 ``baze_size``, ``min_ratio`` 和 ``max_ratio`` 来计算出 ``min_size`` 和 ``max_size`` 。计算公式如下: + + .. code-block:: python + + min_sizes = [] + max_sizes = [] + step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) + for ratio in six.moves.range(min_ratio, max_ratio + 1, step): + min_sizes.append(base_size * ratio / 100.) + max_sizes.append(base_size * (ratio + step) / 100.) + min_sizes = [base_size * .10] + min_sizes + max_sizes = [base_size * .20] + max_sizes + + - **num_classes** (int) - 类别数。 + - **aspect_ratios** (list(float) | tuple(float) | list(list(float)) | tuple(tuple(float)) - 候选框的宽高比, ``aspect_ratios`` 和 ``input`` 的个数必须相等。如果每个特征层提取先验框的 ``aspect_ratio`` 多余一个,写成嵌套的list,例如[[2., 3.]]。 + - **min_ratio** (int)- 先验框的长度和 ``base_size`` 的最小比率,注意,这里是百分比,假如比率为0.2,这里应该给20.0。默认值: None。 + - **max_ratio** (int)- 先验框的长度和 ``base_size`` 的最大比率,注意事项同 ``min_ratio`` 。默认值: None。 + - **min_sizes** (list(float) | tuple(float) | None)- 每层提取的先验框的最小长度,如果输入个数len(inputs)<= 2,则必须设置 ``min_sizes`` ,并且 ``min_sizes`` 的个数应等于len(inputs)。默认值:None。 + - **max_sizes** (list | tuple | None)- 每层提取的先验框的最大长度,如果len(inputs)<= 2,则必须设置 ``max_sizes`` ,并且 ``min_sizes`` 的长度应等于len(inputs)。默认值:None。 + - **steps** (list(float) | tuple(float)) - 相邻先验框的中心点步长 ,如果在水平和垂直方向上步长相同,则设置steps即可,否则分别通过step_w和step_h设置不同方向的步长。如果 ``steps``, ``ste_w`` 和 ``step_h`` 均为None,步长为输入图片的大小 ``base_size`` 和特征图大小的比例。默认值:None。 + - **step_w** (list(float)| tuple(float)) - 水平方向上先验框中心点步长。默认值:None。 + - **step_h** (list | tuple) - 垂直方向上先验框中心点步长。默认值:None。 + - **offset** (float) - 左上角先验框中心在水平和垂直方向上的偏移。默认值:0.5 + - **variance** (list | tuple) - 先验框的方差。默认值:[0.1,0.1,0.2,0.2]。 + - **flip** (bool) - 是否翻转宽高比。默认值:False。 + - **clip** (bool) - 是否剪切超出边界的框。默认值:False。 + - **kernel_size** (int) - 计算回归位置和分类置信度的卷积核的大小。默认值:1。 + - **pad** (int | list(int) | tuple(int)) - 计算回归位置和分类置信度的卷积核的填充。默认值:0。 + - **stride** (int | list | tuple) - 计算回归位置和分类置信度的卷积核的步长。默认值:1。 + - **name** (str) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **min_max_aspect_ratios_order** (bool) - 如果设置为True,则输出先验框的顺序为[min,max,aspect_ratios],这与Caffe一致。请注意,此顺序会影响卷积层后面的权重顺序,但不会影响最终检测结果。默认值:False。 + +返回: + - **mbox_loc(Variable)** - 预测框的回归位置。格式为[N,num_priors,4],其中 ``N`` 是batch size, ``num_priors`` 是总共提取的先验框的个数。 + - **mbox_conf(Variable)** - 预测框的分类信度。格式为[N,num_priors,C],其中 ``num_priors`` 同上,C是类别数。 + - **boxes(Variable)** - 提取的先验框。布局是[num_priors,4], ``num_priors`` 同上,常量4是坐标个数。 + - **variances(Variable)** - 提取的先验框方差。布局是[num_priors,4], ``num_priors`` 同上。 + +返回类型:list(Variable) | tuple(Variable) + +**代码示例1: 设置min_ratio和max_ratio** + +.. code-block:: python + + import paddle.fluid as fluid + + images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32') + conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32') + conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32') + conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32') + conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32') + conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32') + conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32') + + mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head( + inputs=[conv1, conv2, conv3, conv4, conv5, conv6], + image=images, + num_classes=21, + min_ratio=20, + max_ratio=90, + aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], + base_size=300, + offset=0.5, + flip=True, + clip=True) + + +**代码示例2: 设置min_sizes和max_sizes** + +.. code-block:: python + + import paddle.fluid as fluid + + images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32') + conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32') + conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32') + conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32') + conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32') + conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32') + conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32') + + mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head( + inputs=[conv1, conv2, conv3, conv4, conv5, conv6], + image=images, + num_classes=21, + min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0], + max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0], + aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]], + base_size=300, + offset=0.5, + flip=True, + clip=True) + + + diff --git a/doc/paddle/api/paddle/static/nn/nce_cn.rst b/doc/paddle/api/paddle/static/nn/nce_cn.rst new file mode 100644 index 000000000..ef4532b84 --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/nce_cn.rst @@ -0,0 +1,78 @@ +.. _cn_api_fluid_layers_nce: + +nce +------------------------------- + + +.. py:function:: paddle.fluid.layers.nce(input, label, num_total_classes, sample_weight=None, param_attr=None, bias_attr=None, num_neg_samples=None, name=None, sampler='uniform', custom_dist=None, seed=0, is_sparse=False) + +:api_attr: 声明式编程模式(静态图) + + + +计算并返回噪音对比估计损失值( noise-contrastive estimation training loss)。 +请参考 `Noise-contrastive estimation: A new estimation principle for unnormalized statistical models +`_ +该层默认使用均匀分布进行抽样。 + +参数: + - **input** (Variable) - 输入变量, 2-D 张量,形状为 [batch_size, dim],数据类型为 float32 或者 float64。 + - **label** (Variable) - 标签,2-D 张量,形状为 [batch_size, num_true_class],数据类型为 int64。 + - **num_total_classes** (int) - 所有样本中的类别的总数。 + - **sample_weight** (Variable,可选) - 存储每个样本权重,shape 为 [batch_size, 1] 存储每个样本的权重。每个样本的默认权重为1.0。 + - **param_attr** (ParamAttr,可选) :指定权重参数属性的对象。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **bias_attr** (ParamAttr,可选) : 指定偏置参数属性的对象。默认值为None,表示使用默认的偏置参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **num_neg_samples** (int) - 负样例的数量,默认值是10。 + - **name** (str,可选) - 该layer的名称,具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **sampler** (str,可选) – 采样器,用于从负类别中进行取样。可以是 ``uniform``, ``log_uniform`` 或 ``custom_dist`` , 默认 ``uniform`` 。 + - **custom_dist** (nd.array, 可选) – 第0维的长度为 ``num_total_classes`` 。 如果采样器类别为 ``custom_dist`` ,则使用此参数。custom_dist[i] 是第i个类别被取样的概率。默认为 None + - **seed** (int,可选) – 采样器使用的seed。默认为0 + - **is_sparse** (bool,可选) – 标志位,指明是否使用稀疏更新, 为 ``True`` 时 :math:`weight@GRAD` 和 :math:`bias@GRAD` 的类型会变为 SelectedRows。默认为 ``False`` 。 + +返回: nce loss,数据类型与 **input** 相同 + +返回类型: Variable + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + window_size = 5 + words = [] + for i in range(window_size): + words.append(fluid.data( + name='word_{0}'.format(i), shape=[-1, 1], dtype='int64')) + + dict_size = 10000 + label_word = int(window_size / 2) + 1 + + embs = [] + for i in range(window_size): + if i == label_word: + continue + + emb = fluid.layers.embedding(input=words[i], size=[dict_size, 32], + param_attr='embed', is_sparse=True) + embs.append(emb) + + embs = fluid.layers.concat(input=embs, axis=1) + loss = fluid.layers.nce(input=embs, label=words[label_word], + num_total_classes=dict_size, param_attr='nce.w_0', + bias_attr='nce.b_0') + + #or use custom distribution + dist = np.array([0.05,0.5,0.1,0.3,0.05]) + loss = fluid.layers.nce(input=embs, label=words[label_word], + num_total_classes=5, param_attr='nce.w_1', + bias_attr='nce.b_1', + num_neg_samples=3, + sampler="custom_dist", + custom_dist=dist) + + + + diff --git a/doc/paddle/api/paddle/static/nn/prelu_cn.rst b/doc/paddle/api/paddle/static/nn/prelu_cn.rst new file mode 100644 index 000000000..b1ea4cfb5 --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/prelu_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_layers_prelu: + +prelu +------------------------------- + + +.. py:function:: paddle.fluid.layers.prelu(x, mode, param_attr=None, name=None) + +:api_attr: 声明式编程模式(静态图) + + + +等式: + +.. math:: + y = max(0, x) + \alpha min(0, x) + +共提供三种激活方式: + +.. code-block:: text + + all: 所有元素使用同一个alpha值 + channel: 在同一个通道中的元素使用同一个alpha值 + element: 每一个元素有一个独立的alpha值 + + +参数: + - **x** (Variable)- 多维Tensor或LoDTensor,数据类型为float32。 + - **mode** (str) - 权重共享模式。 + - **param_attr** (ParamAttr,可选) - 可学习权重 :math:`[\alpha]` 的参数属性,可由ParamAttr创建。默认值为None,表示使用默认的权重参数属性。具体用法请参见 :ref:`cn_api_fluid_ParamAttr` 。 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + + +返回: 表示激活输出Tensor或LoDTensor,数据类型为float32。与输入形状相同。 + + +返回类型:Variable + + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + from paddle.fluid.param_attr import ParamAttr + x = fluid.data(name="x", shape=[None,5,10,10], dtype="float32") + mode = 'channel' + output = fluid.layers.prelu( + x,mode,param_attr=ParamAttr(name='alpha')) + + diff --git a/doc/paddle/api/paddle/static/nn/row_conv_cn.rst b/doc/paddle/api/paddle/static/nn/row_conv_cn.rst new file mode 100644 index 000000000..ab16bdbfb --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/row_conv_cn.rst @@ -0,0 +1,59 @@ +.. _cn_api_fluid_layers_row_conv: + +row_conv +------------------------------- + + +.. py:function:: paddle.fluid.layers.row_conv(input, future_context_size, param_attr=None, act=None) + +:api_attr: 声明式编程模式(静态图) + + + +该接口为行卷积(Row-convolution operator)或称之为超前卷积(lookahead convolution),最早介绍于DeepSpeech2论文中,论文链接: + + ``_ + +双向的RNN在深度语音模型中很有用,它通过对整个序列执行正向和反向传递来学习序列的表示。然而,与单向RNNs不同的是,在线部署和低延迟设置中,双向RNNs具有难度。超前卷积将来自未来子序列的信息以一种高效的方式进行计算,以改进单向递归神经网络。 row convolution operator 与一维序列卷积不同,计算方法如下: + +给定输入序列长度为 :math:`t` 的输入序列 :math:`X` 和输入维度 :math:`D` ,以及一个大小为 :math:`context * D` 的滤波器 :math:`W` ,输出序列卷积为: + +.. math:: + out_i = \sum_{j=i}^{i+context-1} X_{j} · W_{j-i} + +公式中: + - :math:`out_i` : 第i行输出变量形为[1, D]. + - :math:`context` : 下文(future context)大小 + - :math:`X_j` : 第j行输出变量,形为[1,D] + - :math:`W_{j-i}` : 第(j-i)行参数,其形状为[1,D]。 + +详细请参考 `设计文档 `_ 。 + +参数: + - **input** (Variable) -- 支持输入为LodTensor和Tensor,输入类型可以是[float32, float64],它支持可变时间长度的输入序列。当输入input为LodTensor时,其内部张量是一个具有形状(T x N)的矩阵,其中T是这个mini batch中的总的timestep,N是输入数据维数。当输入input为Tensor时,其形状为(B x T x N)的三维矩阵,B为mini batch大小,T为每个batch输入中的最大timestep,N是输入数据维数。当输入input为LoDTensor,形状为[9, N],LoD信息为[2, 3, 4],等价于输入input为形状是[3, 4, N]的Tensor。 + - **future_context_size** (int) -- 下文大小。请注意,卷积核的shape是[future_context_size + 1, N],N和输入input的数据维度N保持一致。 + - **param_attr** (ParamAttr) -- 参数的属性,包括名称、初始化器等。 + - **act** (str) -- 非线性激活函数。 + +返回:表示row_conv计算结果的Variable,数据类型、维度和输入input相同。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + # LoDTensor input + x = fluid.layers.data(name='x', shape=[9, 16], + dtype='float32', lod_level=3, + append_batch_size=False) + out = fluid.layers.row_conv(input=x, future_context_size=2) + + # Tensor input + x = fluid.layers.data(name='x', shape=[9, 4, 16], + dtype='float32', + append_batch_size=False) + out = fluid.layers.row_conv(input=x, future_context_size=2) + + + diff --git a/doc/paddle/api/paddle/static/nn/spectral_norm_cn.rst b/doc/paddle/api/paddle/static/nn/spectral_norm_cn.rst new file mode 100644 index 000000000..9ca240e3c --- /dev/null +++ b/doc/paddle/api/paddle/static/nn/spectral_norm_cn.rst @@ -0,0 +1,57 @@ +.. _cn_api_fluid_layers_spectral_norm: + +spectral_norm +------------------------------- + + +.. py:function:: paddle.fluid.layers.spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None) + +:api_attr: 声明式编程模式(静态图) + + + +**Spectral Normalization Layer** + +该OP用于计算了fc、conv1d、conv2d、conv3d层的权重参数的谱正则值,输入权重参数应分别为2-D, 3-D, 4-D, 5-D张量,输出张量与输入张量shape相同。谱特征值计算方式如下。 + +步骤1:生成形状为[H]的向量U,以及形状为[W]的向量V,其中H是输入权重张量的第 ``dim`` 个维度,W是剩余维度的乘积。 + +步骤2: ``power_iters`` 应该是一个正整数,用U和V迭代计算 ``power_iters`` 轮,迭代步骤如下。 + +.. math:: + + \mathbf{v} &:= \frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}\\ + \mathbf{u} &:= \frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2} + +步骤3:计算 :math:`\sigma(\mathbf{W})` 并特征值值归一化。 + +.. math:: + \sigma(\mathbf{W}) &= \mathbf{u}^{T} \mathbf{W} \mathbf{v}\\ + \mathbf{W} &= \frac{\mathbf{W}}{\sigma(\mathbf{W})} + +可参考: `Spectral Normalization `_ + +参数: + - **weight** (Variable) - spectral_norm算子的输入权重张量,可以是2-D, 3-D, 4-D, 5-D Tensor,它是fc、conv1d、conv2d、conv3d层的权重,数据类型为float32或float64。 + - **dim** (int) - 将输入(weight)重塑为矩阵之前应排列到第一个的维度索引,如果input(weight)是fc层的权重,则应设置为0;如果input(weight)是conv层的权重,则应设置为1,默认为0。 + - **power_iters** (int) - 将用于计算spectral norm的功率迭代次数,默认值1 + - **eps** (float) - epsilon用于保证计算规范中的数值稳定性,分母会加上 ``eps`` 防止除零,默认1e-12 + - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值:None。 + +返回:Variable(Tensor) 谱正则化后权重张量,维度与输入 ``weight`` 一致。 + +返回类型:变量(Variable),数据类型与输入 ``weight`` 一致。 + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + + weight = fluid.data(name='weight', shape=[2, 8, 32, 32], dtype='float32') + x = fluid.layers.spectral_norm(weight=weight, dim=1, power_iters=2) + + + + + diff --git a/doc/paddle/api/paddle/static/save_cn.rst b/doc/paddle/api/paddle/static/save_cn.rst new file mode 100644 index 000000000..a15b23f2b --- /dev/null +++ b/doc/paddle/api/paddle/static/save_cn.rst @@ -0,0 +1,51 @@ +.. _cn_api_fluid_save: + +save +------------------------------- + + +.. py:function:: paddle.fluid.save(program, model_path) + +:api_attr: 声明式编程模式(静态图) +:alias_main: paddle.save +:alias: paddle.save,paddle.tensor.save,paddle.tensor.io.save +:old_api: paddle.fluid.save + + + +该接口将传入的参数、优化器信息和网络描述保存到 ``model_path`` 。 + +参数包含所有的可训练 :ref:`cn_api_fluid_Variable` ,将保存到后缀为 ``.pdparams`` 的文件中。 + +优化器信息包含优化器使用的所有变量。对于Adam优化器,包含beta1、beta2、momentum等。 +所有信息将保存到后缀为 ``.pdopt`` 的文件中。(如果优化器没有需要保存的变量(如sgd),则不会生成)。 + +网络描述是程序的描述。它只用于部署。描述将保存到后缀为 ``.pdmodel`` 的文件中。 + +参数: + - **program** ( :ref:`cn_api_fluid_Program` ) – 要保存的Program。 + - **model_path** (str) – 保存program的文件前缀。格式为 ``目录名称/文件前缀``。如果文件前缀为空字符串,会引发异常。 + +返回: 无 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + x = fluid.data(name="x", shape=[10, 10], dtype='float32') + y = fluid.layers.fc(x, 10) + z = fluid.layers.fc(y, 10) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + fluid.save(fluid.default_main_program(), "./test_path") + + + + + + + diff --git a/doc/paddle/api/paddle/static/scope_guard_cn.rst b/doc/paddle/api/paddle/static/scope_guard_cn.rst index e220cd8d4..df0566e1b 100644 --- a/doc/paddle/api/paddle/static/scope_guard_cn.rst +++ b/doc/paddle/api/paddle/static/scope_guard_cn.rst @@ -1,10 +1,10 @@ -.. _cn_api_fluid_executor_scope_guard: +.. _cn_api_fluid_scope_guard: scope_guard ------------------------------- -.. py:function:: paddle.fluid.executor.scope_guard (scope) +.. py:function:: paddle.fluid.scope_guard(scope) :api_attr: 声明式编程模式(静态图) diff --git a/doc/paddle/api/paddle/fluid/layers/strided_slice_cn.rst b/doc/paddle/api/paddle/strided_slice_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/strided_slice_cn.rst rename to doc/paddle/api/paddle/strided_slice_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/sums_cn.rst b/doc/paddle/api/paddle/sums_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/sums_cn.rst rename to doc/paddle/api/paddle/sums_cn.rst diff --git a/doc/paddle/api/paddle/tanh_cn.rst b/doc/paddle/api/paddle/tanh_cn.rst new file mode 100644 index 000000000..c5fbd2124 --- /dev/null +++ b/doc/paddle/api/paddle/tanh_cn.rst @@ -0,0 +1,46 @@ +.. _cn_api_fluid_layers_tanh: + +tanh +------------------------------- + +.. py:function:: paddle.fluid.layers.tanh(x, name=None) + + + + + +tanh 激活函数 + +.. math:: + out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}} + + +参数: + + - **x** - Tanh算子的输入 + - **name** (str|None) - 该层名称(可选)。若设为None,则自动为该层命名。 + +返回: 张量(Tensor) + +返回类型: 变量(Variable) + +**代码示例**: + +.. code-block:: python + + import paddle.fluid as fluid + data = fluid.layers.data(name="input", shape=[32, 784]) + result = fluid.layers.tanh(data) + + + + + + + + + + + + + diff --git a/doc/paddle/api/paddle/tensor/creation/ones_cn.rst b/doc/paddle/api/paddle/tensor/creation/ones_cn.rst index fa13c3412..32a6b2a57 100644 --- a/doc/paddle/api/paddle/tensor/creation/ones_cn.rst +++ b/doc/paddle/api/paddle/tensor/creation/ones_cn.rst @@ -1,46 +1,26 @@ -.. _cn_api_tensor_ones: +.. _cn_api_fluid_layers_ones: ones ------------------------------- -.. py:function:: paddle.ones(shape, dtype=None) - - +.. py:function:: paddle.fluid.layers.ones(shape,dtype,force_cpu=False) 该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为1的Tensor。 参数: - **shape** (tuple|list|Tensor) - 输出Tensor的形状, ``shape`` 的数据类型为int32或者int64。 - - **dtype** (np.dtype|core.VarDesc.VarType|str, 可选) - 输出Tensor的数据类型,数据类型必须为bool、 float16、float32、float64、int32或int64。如果 ``dtype`` 为None,默认数据类型为float32。 - - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **dtype** (np.dtype|core.VarDesc.VarType|str) - 输出Tensor的数据类型,数据类型必须为float16、float32、float64、int32或int64。 + - **force_cpu** (bool, 可选) – 是否强制将输出Tensor写入CPU内存。如果 ``force_cpu`` 为False,则将输出Tensor写入当前所在运算设备的内存,默认为False。 返回:值全为1的Tensor,数据类型和 ``dtype`` 定义的类型一致。 - 抛出异常: - ``TypeError`` - 当 ``dtype`` 不是bool、 float16、float32、float64、int32、int64和None时。 - - ``TypeError`` - 当 ``shape`` 不是tuple、list、或者Tensor的时, 当 ``shape`` 为Tensor时,其数据类型不是int32或者int64。 + - ``TypeError`` - 当 ``shape`` 不是tuple、list、或者Tensor时, 当 ``shape`` 为Tensor,其数据类型不是int32或者int64时。 **代码示例**: .. code-block:: python - import paddle - - paddle.enable_imperative() - - #default dtype for ones OP - data1 = paddle.ones(shape=[3, 2]) - # [[1. 1.] - # [1. 1.] - # [1. 1.]] - data2 = paddle.ones(shape=[2, 2], dtype='int32') - # [[1 1] - # [1 1]] - - #attr shape is a Variable Tensor - shape = paddle.fill_constant(shape=[2], dtype='int32', value=2) - data3 = paddle.ones(shape=shape, dtype='int32') - # [[1 1] - # [1 1]] - + import paddle.fluid as fluid + data = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]] diff --git a/doc/paddle/api/paddle/tensor/creation/ones_like_cn.rst b/doc/paddle/api/paddle/tensor/creation/ones_like_cn.rst index 33b189c7a..5d1e6a897 100644 --- a/doc/paddle/api/paddle/tensor/creation/ones_like_cn.rst +++ b/doc/paddle/api/paddle/tensor/creation/ones_like_cn.rst @@ -1,39 +1,34 @@ -.. _cn_api_tensor_ones_like: +.. _cn_api_fluid_layers_ones_like: ones_like ------------------------------- -.. py:function:: paddle.ones_like(x, dtype=None, name=None) +.. py:function:: paddle.fluid.layers.ones_like(x, out=None) -:alias_main: paddle.ones_like -:alias: paddle.tensor.ones_like, paddle.tensor.creation.ones_like -该OP返回一个和 ``x`` 具有相同形状的数值都为1的Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 -参数 -:::::::::: - - **x** (Tensor) – 输入的Tensor,数据类型可以是bool,float16, float32,float64,int32,int64。输出Tensor的形状和 ``x`` 相同。如果 ``dtype`` 为None,则输出Tensor的数据类型与 ``x`` 相同。 - - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持bool,float16, float32,float64,int32,int64。当该参数值为None时, 输出Tensor的数据类型与 ``x`` 相同。默认值为None. - - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 - -返回 -:::::::::: - Tensor:和 ``x`` 具有相同形状的数值都为1的Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 -抛出异常 -:::::::::: - - ``TypeError`` - 如果 ``dtype`` 不是bool、float16、float32、float64、int32、int64。 +ones_like + +该功能创建一个形状与类型与x相似的张量,初始值为1。 + + +参数: + - **x** (Variable) - 指定形状与数据类型的输入张量 + - **out** (Variable)-输出张量 -代码示例 -:::::::::: +返回:输出张量 + +返回类型:变量(Variable) + +**代码示例**: .. code-block:: python - import paddle - import numpy as np + import paddle.fluid as fluid + + x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False) + data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0] + - paddle.enable_imperative() - x = paddle.imperative.to_variable(np.array([1,2,3], dtype='float32')) - out1 = paddle.ones_like(x) # [1., 1., 1.] - out2 = paddle.ones_like(x, dtype='int32') # [1, 1, 1] diff --git a/doc/paddle/api/paddle/tensor/creation/zeros_cn.rst b/doc/paddle/api/paddle/tensor/creation/zeros_cn.rst index cf3b97866..7206c16ac 100644 --- a/doc/paddle/api/paddle/tensor/creation/zeros_cn.rst +++ b/doc/paddle/api/paddle/tensor/creation/zeros_cn.rst @@ -1,42 +1,26 @@ -.. _cn_api_tensor_zeros: +.. _cn_api_fluid_layers_zeros: zeros ------------------------------- -.. py:function:: paddle.zeros(shape, dtype=None, name=None) - - +.. py:function:: paddle.fluid.layers.zeros(shape,dtype,force_cpu=False) 该OP创建形状为 ``shape`` 、数据类型为 ``dtype`` 且值全为0的Tensor。 参数: - **shape** (tuple|list|Tensor) - 输出Tensor的形状, ``shape`` 的数据类型为int32或者int64。 - - **dtype** (np.dtype|core.VarDesc.VarType|str) - 输出Tensor的数据类型,数据类型必须为bool、float16、float32、float64、int32或int64。 + - **dtype** (np.dtype|core.VarDesc.VarType|str) - 输出Tensor的数据类型,数据类型必须为float16、float32、float64、int32或int64。 + - **force_cpu** (bool, 可选) - 是否强制将输出Tensor写入CPU内存。如果 ``force_cpu`` 为False,则将输出Tensor写入当前所在运算设备的内存,默认为False。 返回:值全为0的Tensor,数据类型和 ``dtype`` 定义的类型一致。 抛出异常: - ``TypeError`` - 当 ``dtype`` 不是bool、 float16、float32、float64、int32、int64和None时。 - - ``TypeError`` - 当 ``shape`` 不是tuple、list、或者Tensor时, 当 ``shape`` 为Tensor,其数据类型不是int32或者int64时。 + - ``TypeError`` - 当 ``shape`` 不是tuple、list、或者Tensor时。 当 ``shape`` 为Tensor,其数据类型不是int32或者int64时。 **代码示例**: .. code-block:: python - import paddle - paddle.enable_imperative() # Now we are in imperative mode - data = paddle.zeros(shape=[3, 2], dtype='float32') - # [[0. 0.] - # [0. 0.] - # [0. 0.]] - - data = paddle.zeros(shape=[2, 2]) - # [[0. 0.] - # [0. 0.]] - - # shape is a Tensor - shape = paddle.fill_constant(shape=[2], dtype='int32', value=2) - data3 = paddle.ones(shape=shape, dtype='int32') - # [[0 0] - # [0 0]] - + import paddle.fluid as fluid + data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]] diff --git a/doc/paddle/api/paddle/tensor/creation/zeros_like_cn.rst b/doc/paddle/api/paddle/tensor/creation/zeros_like_cn.rst index 7cdacfeb4..d75dc7cd1 100644 --- a/doc/paddle/api/paddle/tensor/creation/zeros_like_cn.rst +++ b/doc/paddle/api/paddle/tensor/creation/zeros_like_cn.rst @@ -1,40 +1,29 @@ -.. _cn_api_tensor_zeros_like: +.. _cn_api_fluid_layers_zeros_like: zeros_like ------------------------------- -.. py:function:: paddle.zeros_like(x, dtype=None, name=None) +.. py:function:: paddle.fluid.layers.zeros_like(x, out=None) -:alias_main: paddle.zeros_like -:alias: paddle.tensor.zeros_like, paddle.tensor.creation.zeros_like -:update_api: paddle.fluid.layers.zeros_like -该OP返回一个和 ``x`` 具有相同的形状的全零Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 -参数 -:::::::::: - - **x** (Tensor) – 输入的多维Tensor,数据类型可以是bool,float16, float32,float64,int32,int64。输出Tensor的形状和 ``x`` 相同。如果 ``dtype`` 为None,则输出Tensor的数据类型与 ``x`` 相同。 - - **dtype** (str|np.dtype|core.VarDesc.VarType, 可选) - 输出Tensor的数据类型,支持bool,float16, float32,float64,int32,int64。当该参数值为None时, 输出Tensor的数据类型与 ``x`` 相同。默认值为None. - - **name** (str, 可选) - 输出的名字。一般无需设置,默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 + + +该OP创建一个和x具有相同的形状和数据类型的全零Tensor。 + +参数: + - **x** (Variable) – 指定输入为一个多维的Tensor,数据类型可以是bool,float32,float64,int32,int64。 + - **out** (Variable|可选) – 如果为None,则创建一个Variable作为输出,创建后的Variable的数据类型,shape大小和输入变量x一致。如果是输入的一个Tensor,数据类型和数据shape大小需要和输入变量x一致。默认值为None。 -返回 -:::::::::: - Tensor:和 ``x`` 具有相同的形状全零Tensor,数据类型为 ``dtype`` 或者和 ``x`` 相同。 +返回:返回一个多维的Tensor,具体的元素值和输入的数据类型相关,如果是bool类型的,则全False,其它均为0。数据shape大小和输入x一致。 -抛出异常 -:::::::::: - - ``TypeError`` - 如果 ``dtype`` 不是bool、float16、float32、float64、int32、int64。 +返回类型:Variable -代码示例 -:::::::::: +**代码示例**: .. code-block:: python - import paddle - import numpy as np - - paddle.enable_imperative() + import paddle.fluid as fluid + x = fluid.data(name='x', dtype='float32', shape=[3]) + data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0] - x = paddle.imperative.to_variable(np.array([1,2,3], dtype='float32')) - out1 = paddle.zeros_like(x) # [0., 0., 0.] - out2 = paddle.zeros_like(x, dtype='int32') # [0, 0, 0] diff --git a/doc/paddle/api/paddle/tensor/manipulation/concat_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/concat_cn.rst index aa36dd238..46b1b3c3d 100644 --- a/doc/paddle/api/paddle/tensor/manipulation/concat_cn.rst +++ b/doc/paddle/api/paddle/tensor/manipulation/concat_cn.rst @@ -1,54 +1,51 @@ -.. _cn_api_tensor_concat: +.. _cn_api_fluid_layers_concat: concat ------------------------------- -.. py:function:: paddle.tensor.concat(x, axis=0, name=None) +.. py:function:: paddle.fluid.layers.concat(input, axis=0, name=None) 该OP对输入沿 ``axis`` 轴进行联结,返回一个新的Tensor。 参数: - - **x** (list|tuple) - 待联结的Tensor list或者Tensor tuple ,支持的数据类型为:bool, float16, float32、float64、int32、int64, ``x`` 中所有Tensor的数据类型应该一致。 - - **axis** (int|Tensor,可选) - 指定对输入 ``x`` 进行运算的轴,可以是整数或者形状为[1]的Tensor,数据类型为int32或者int64。 ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 中Tensor的维度, ``axis`` 为负值时与 :math:`axis + R` 等价。默认值为0。 + - **input** (list|tuple|Tensor) - 待联结的Tensor list,Tensor tuple或者Tensor,支持的数据类型为:bool、float16、 float32、float64、int32、int64。 ``input`` 中所有Tensor的数据类型必须一致。 + - **axis** (int|Tensor,可选) - 指定对输入Tensor进行运算的轴,可以是整数或者形状为[1]的Tensor,数据类型为int32或者int64。 ``axis`` 的有效范围是[-R, R),R是输入 ``input`` 中Tensor 的维度, ``axis`` 为负值时与 :math:`axis + R` 等价。默认值为0。 - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 -返回:联结后的Tensor ,数据类型和 ``x`` 中的Tensor相同。 +返回:联结后的 ``Tensor`` ,数据类型和 ``input`` 中的Tensor相同。 + 抛出异常: - - ``TypeError``: - 当输入 ``x`` 的类型不是list或者tuple时。 - - ``TypeError``: - 当输入 ``x`` 的数据类型不是 bool,float16, float32, float64, int32, int64时。 - - ``TypeError``: - 当 ``axis`` 的类型不是int或者Tensor时。 当 ``axis`` 是Tensor的时候其数据类型不是int32或者int64时。 - - ``TypeError``: - 当输入 ``x`` 中的Tensor存在数据类型不一致时。 + - ``TypeError``: - 当输入 ``input`` 的类型不是list、tuple或者Tensor的时候。 + - ``TypeError``: - 当输入 ``input`` 的数据类型不是 bool,float16, float32, float64, int32, int64时。 + - ``TypeError``: - 当 ``axis`` 的类型不是int或者Tensor时。当 ``axis`` 是Tensor的时候其数据类型不是int32或者int64时。 + - ``TypeError``: - 当输入 ``input`` 中的Tensor存在数据类型不一致时。 **代码示例**: .. code-block:: python - - import paddle + + import paddle.fluid as fluid import numpy as np - - paddle.enable_imperative() # Now we are in imperative mode + in1 = np.array([[1, 2, 3], [4, 5, 6]]) in2 = np.array([[11, 12, 13], [14, 15, 16]]) in3 = np.array([[21, 22], [23, 24]]) - x1 = paddle.imperative.to_variable(in1) - x2 = paddle.imperative.to_variable(in2) - x3 = paddle.imperative.to_variable(in3) - zero = paddle.full(shape=[1], dtype='int32', fill_value=0) - # When the axis is negative, the real axis is (axis + Rank(x)) - # As follow, axis is -1, Rank(x) is 2, the real axis is 1 - out1 = paddle.concat(x=[x1, x2, x3], axis=-1) - out2 = paddle.concat(x=[x1, x2], axis=0) - out3 = paddle.concat(x=[x1, x2], axis=zero) - # out1 - # [[ 1 2 3 11 12 13 21 22] - # [ 4 5 6 14 15 16 23 24]] - # out2 out3 - # [[ 1 2 3] - # [ 4 5 6] - # [11 12 13] - # [14 15 16]] + with fluid.dygraph.guard(): + x1 = fluid.dygraph.to_variable(in1) + x2 = fluid.dygraph.to_variable(in2) + x3 = fluid.dygraph.to_variable(in3) + out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1) + out2 = fluid.layers.concat(input=[x1, x2], axis=0) + print(out1.numpy()) + # [[ 1 2 3 11 12 13 21 22] + # [ 4 5 6 14 15 16 23 24]] + print(out2.numpy()) + # [[ 1 2 3] + # [ 4 5 6] + # [11 12 13] + # [14 15 16]] diff --git a/doc/paddle/api/paddle/tensor/manipulation/expand_as_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/expand_as_cn.rst new file mode 100644 index 000000000..3781c7c93 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/expand_as_cn.rst @@ -0,0 +1,68 @@ +.. _cn_api_fluid_layers_expand_as: + +expand_as +------------------------------- + +.. py:function:: paddle.fluid.layers.expand_as(x, target_tensor, name=None) + +:alias_main: paddle.expand_as +:alias: paddle.expand_as,paddle.tensor.expand_as,paddle.tensor.manipulation.expand_as +:old_api: paddle.fluid.layers.expand_as + + + +该OP会根据输入的variable ``target_tensor`` 对输入 ``x`` 的各维度进行广播。通过 ``target_tensor``的维度来为 ``x`` 的每个维度设置广播的次数,使得x 的维度与target_tensor的维度相同。 ``x`` 的秩应小于等于6。注意, ``target_tensor`` 的秩必须与 ``x`` 的秩相同。 +注意:``target_tensor`` 对应的每一维必须能整除输入x中对应的维度,否则会报错。比如,target_tensor的维度为[2,6,2],x为[2,3,1],则整除后为[1,2,2],x广播后维度为[2,6,2]。如果target_tensor的维度为[2,5,2],第二维5不能整除x的第二维3,则会报错。 + +以下是一个示例: + +:: + + 输入(x) 是一个形状为[2, 3, 1]的 3-D Tensor : + + [ + [[1], [2], [3]], + [[4], [5], [6]] + ] + + target_tensor的维度 : [2, 6, 2] + + 输出(out) 是一个形状为[2, 6, 2]的 3-D Tensor: + + [ + [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]], + [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]] + ] + + + +参数: + - **x** (Variable)- 维度最高为6的多维 ``Tensor`` 或 ``LoDTensor``,数据类型为 ``float32``,``float64``,``int32`` 或 ``bool``。 + - **target_tensor** (list|tuple|Variable)- 数据类型为 ``float32``,``float64``,``int32`` 或 ``bool`` 。可为Tensor或者LODTensor。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。 + +返回:维度与输入 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor``,数据类型与 ``x`` 相同。返回值的每个维度的大小等于``target_tensor`` 对应的维度的大小。 + +返回类型:``Variable`` 。 + +抛出异常: + - :code:`ValueError`:``target_tensor`` 对应的每一维必须能整除输入x中对应的维度,否则会报错。 + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + import numpy as np + data = fluid.data(name="data", shape=[-1,10], dtype='float64') + target_tensor = fluid.data(name="target_tensor", shape=[-1,20], dtype='float64') + result = fluid.layers.expand_as(x=data, target_tensor=target_tensor) + use_cuda = False + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(3,10) + y = np.random.rand(3,20) + output= exe.run(feed={"data":x,"target_tensor":y},fetch_list=[result.name]) + print(output[0].shape) + #(3,20) \ No newline at end of file diff --git a/doc/paddle/api/paddle/tensor/manipulation/expand_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/expand_cn.rst new file mode 100644 index 000000000..6bd61b158 --- /dev/null +++ b/doc/paddle/api/paddle/tensor/manipulation/expand_cn.rst @@ -0,0 +1,71 @@ +.. _cn_api_fluid_layers_expand: + +expand +------------------------------- + +.. py:function:: paddle.fluid.layers.expand(x, expand_times, name=None) + +:alias_main: paddle.expand +:alias: paddle.expand,paddle.tensor.expand,paddle.tensor.manipulation.expand +:old_api: paddle.fluid.layers.expand + + + +该OP会根据参数 ``expand_times`` 对输入 ``x`` 的各维度进行复制。通过参数 ``expand_times`` 来为 ``x`` 的每个维度设置复制次数。 ``x`` 的秩应小于等于6。注意, ``expand_times`` 的大小必须与 ``x`` 的秩相同。以下是一个用例: + +:: + + 输入(x) 是一个形状为[2, 3, 1]的 3-D Tensor : + + [ + [[1], [2], [3]], + [[4], [5], [6]] + ] + + 属性(expand_times): [1, 2, 2] + + 输出(out) 是一个形状为[2, 6, 2]的 3-D Tensor: + + [ + [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]], + [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]] + ] + +参数: + - **x** (Variable)- 维度最高为6的多维 ``Tensor`` 或 ``LoDTensor``,数据类型为 ``float32``,``float64``,``int32`` 或 ``bool``。 + - **expand_times** (list|tuple|Variable)- 数据类型是 ``int32`` 。如果 ``expand_times`` 的类型是 list 或 tuple,它的元素可以是整数或者形状为[1]的 ``Tensor`` 或 ``LoDTensor``。如果 ``expand_times`` 的类型是 ``Variable``,则是1-D ``Tensor`` 或 ``LoDTensor``。表示 ``x`` 每一个维度被复制的次数。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。 + +返回:维度与输入 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor``,数据类型与 ``x`` 相同。返回值的每个维度的大小等于 ``x`` 的相应维度的大小乘以 ``expand_times`` 给出的相应值。 + +返回类型:``Variable`` 。 + +抛出异常: + - :code:`TypeError`:``expand_times`` 的类型应该是 list、tuple 或 Variable。 + - :code:`ValueError`:``expand_times`` 中的元素不能是负值。 + + +**代码示例** + +.. code-block:: python + + import paddle.fluid as fluid + + # example 1: + data_1 = fluid.layers.fill_constant(shape=[2, 3, 1], dtype='int32', value=0) + expanded_1 = fluid.layers.expand(data_1, expand_times=[1, 2, 2]) + # the shape of expanded_1 is [2, 6, 2]. + + # example 2: + data_2 = fluid.layers.fill_constant(shape=[12, 14], dtype="int32", value=3) + expand_times = fluid.layers.fill_constant(shape=[2], dtype="int32", value=4) + expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times) + # the shape of expanded_2 is [48, 56]. + + + + + + + + diff --git a/doc/paddle/api/paddle/tensor/manipulation/gather_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/gather_cn.rst index 74bc19465..7e1f85a6c 100644 --- a/doc/paddle/api/paddle/tensor/manipulation/gather_cn.rst +++ b/doc/paddle/api/paddle/tensor/manipulation/gather_cn.rst @@ -1,41 +1,56 @@ -.. _cn_api_paddle_tensor_gather +.. _cn_api_fluid_layers_gather: + gather ------------------------------- -.. py:function:: paddle.tensor.gather(input, index, overwrite=True) +.. py:function:: paddle.fluid.layers.gather(input, index, overwrite=True) -:alias_main: paddle.gather -:alias: paddle.gather,paddle.tensor.gather,paddle.tensor.manipulation.gather -:update_api: paddle.fluid.layers.gather -根据索引 index 获取输入(input)的最外层维度的条目,并将它们拼接在一起。 +根据索引 ``index`` 获取输入(input)的最外层维度的条目,并将它们拼接在一起。 .. math:: Out=X[Index] -**参数**: +.. code-block:: text + + X = [[1, 2], + [3, 4], + [5, 6]] + + Index = [1, 2] + + Then: + + Out = [[3, 4], + [5, 6]] + + +参数: - **input** (Variable) - 输入, 秩 ``rank >= 1`` , 支持的数据类型包括 int32、int64、float32、float64 和 uint8 (CPU)、float16(GPU) 。 - **index** (Variable) - 索引,秩 ``rank = 1``, 数据类型为 int32 或 int64。 - **overwrite** (bool) - 具有相同索引时在反向更新梯度的模式。如果为 ``True`` ,则使用覆盖模式更新相同索引的梯度;如果为 ``False`` ,则使用累积模式更新相同索引的梯度。默认值为 ``True`` 。 -**返回**:和输入的秩相同的输出张量。 +返回:和输入的秩相同的输出张量。 -**返回类型**:Variable +返回类型:Variable -**代码示例**: +**代码示例** .. code-block:: python + + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[-1, 5], dtype='float32') + index = fluid.layers.data(name='index', shape=[-1, 1], dtype='int32') + output = fluid.layers.gather(x, index) + + + + + + + + - import paddle - import paddle.fluid as fluid - import numpy as np - with fluid.dygraph.guard(): - input_1 = np.array([[1,2,3],[4,5,6],[7,8,9]]) - index_1 = np.array([0,1]) - input = fluid.dygraph.to_variable(input_1) - index = fluid.dygraph.to_variable(index_1) - output = paddle.fluid.layers.gather(input, index) - # expected output: [[1, 2, 3],[4, 5, 6]] diff --git a/doc/paddle/api/paddle/tensor/manipulation/split_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/split_cn.rst index 52f85fccc..ca1607ee5 100644 --- a/doc/paddle/api/paddle/tensor/manipulation/split_cn.rst +++ b/doc/paddle/api/paddle/tensor/manipulation/split_cn.rst @@ -1,56 +1,65 @@ -.. _cn_api_paddle_tensor_split +.. _cn_api_fluid_layers_split: + split ------------------------------- -.. py:function:: paddle.tensor.split(x, num_or_sections, axis=0, name=None) +.. py:function:: paddle.fluid.layers.split(input, num_or_sections, dim=-1, name=None) + 该OP将输入Tensor分割成多个子Tensor。 -**参数**: - - **x** (Tensor) - 输入变量,数据类型为bool, float16, float32,float64,int32,int64的多维Tensor。 - - **num_or_sections** (int|list|tuple) - 如果 ``num_or_sections`` 是一个整数,则表示Tensor平均划分为相同大小子Tensor的数量。如果 ``num_or_sections`` 是一个list或tuple,那么它的长度代表子Tensor的数量,它的元素可以是整数或者形状为[1]的Tensor,依次代表子Tensor需要分割成的维度的大小。list或tuple的长度不能超过输入Tensor待分割的维度的大小。在list或tuple中,至多有一个元素值为-1,表示该值是由 ``x`` 的维度和其他 ``num_or_sections`` 中元素推断出来的。例如对一个维度为[4,6,6]Tensor的第三维进行分割时,指定 ``num_or_sections=[2,-1,1]`` ,输出的三个Tensor维度分别为:[4,6,2],[4,6,3],[4,6,1]。 - - **axis** (int|Tensor,可选) - 整数或者形状为[1]的Tensor,数据类型为int32或int64。表示需要分割的维度。如果 ``axis < 0`` ,则划分的维度为 ``rank(x) + axis`` 。默认值为0。 - - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 +参数: + - **input** (Tensor) - 输入变量,数据类型为bool, float16,float32,float64,int32,int64的多维Tensor。 + - **num_or_sections** (int|list|tuple) - 如果 ``num_or_sections`` 是一个整数,则表示Tensor平均划分为相同大小子Tensor的数量。如果 ``num_or_sections`` 是一个list或tuple,那么它的长度代表子Tensor的数量,它的元素可以是整数或者形状为[1]的Tensor,依次代表子Tensor需要分割成的维度的大小。list或tuple的长度不能超过输入Tensor待分割的维度的大小。至多有一个元素值为-1,-1表示该值是由 ``input`` 待分割的维度值和 ``num_or_sections`` 的剩余元素推断出来的。 + - **dim** (int|Tenspr,可选) - 整数或者形状为[1]的Tensor,数据类型为int32或int64。表示需要分割的维度。如果 ``dim < 0`` ,则划分的维度为 ``rank(input) + dim`` 。默认值为-1。 + - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 返回:分割后的Tensor列表。 + 抛出异常: - - :code:`TypeError`:``x`` 的数据类型不是float16、float32、float64、int32或int64时 。 + - :code:`TypeError`:``input`` 的数据类型不是bool、float16、float32、float64、int32或int64时 。 - :code:`TypeError`:``num_or_sections`` 不是int、list 或 tuple时。 - - :code:`TypeError`:``axis`` 不是 int 或 Tensor时。当 ``axis`` 为Tensor,其数据类型不是int32或int64时。 + - :code:`TypeError`:``dim`` 不是 int 或 Tensor时。当 ``dim`` 为Tensor,其数据类型不是int32或int64时。 **代码示例**: .. code-block:: python - import numpy as np - import paddle - - paddle.enable_imperative() - # x is a Tensor which shape is [3, 9, 5] - x_np = np.random.random([3, 9, 5]).astype("int32") - x = paddle.imperative.to_variable(x_np) + import paddle.fluid as fluid - out0, out1, out22 = paddle.split(x, num_or_sections=3, axis=1) + # input is a Tensor which shape is [3, 9, 5] + input = fluid.data( + name="input", shape=[3, 9, 5], dtype="float32") + + out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=1) # out0.shape [3, 3, 5] # out1.shape [3, 3, 5] # out2.shape [3, 3, 5] - out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, 4], axis=1) + out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=1) # out0.shape [3, 2, 5] # out1.shape [3, 3, 5] # out2.shape [3, 4, 5] - out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, -1], axis=1) + out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, -1], dim=1) # out0.shape [3, 2, 5] # out1.shape [3, 3, 5] # out2.shape [3, 4, 5] - # axis is negative, the real axis is (rank(x) + axis) which real + # dim is negative, the real dim is (rank(input) + axis) which real # value is 1. - out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=-2) + out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=-2) # out0.shape [3, 3, 5] # out1.shape [3, 3, 5] # out2.shape [3, 3, 5] + + + + + + + + diff --git a/doc/paddle/api/paddle/tensor/manipulation/squeeze_cn.rst b/doc/paddle/api/paddle/tensor/manipulation/squeeze_cn.rst deleted file mode 100644 index 026f38455..000000000 --- a/doc/paddle/api/paddle/tensor/manipulation/squeeze_cn.rst +++ /dev/null @@ -1,66 +0,0 @@ -.. _cn_api_fluid_layers_squeeze: - -squeeze -------------------------------- - -.. py:function:: paddle.fluid.layers.squeeze(input, axes, name=None) - - - - -该OP会根据axes压缩输入Tensor的维度。如果指定了axes,则会删除axes中指定的维度,axes指定的维度要等于1。如果没有指定axes,那么所有等于1的维度都会被删除。 - -- 例1: - -.. code-block:: python - - 输入: - X.shape = [1,3,1,5] - axes = [0] - 输出; - Out.shape = [3,1,5] -- 例2: - -.. code-block:: python - - 输入: - X.shape = [1,3,1,5] - axes = [] - 输出: - Out.shape = [3,5] -- 例3: - -.. code-block:: python - - 输入: - X.shape = [1,3,1,5] - axes = [-2] - 输出: - Out.shape = [1,3,5] - -参数: - - **input** (Variable) - 输入任意维度的Tensor。 支持的数据类型:float32,float64,int8,int32,int64。 - - **axes** (list) - 输入一个或一列整数,代表要压缩的轴。axes的范围: :math:`[-rank(input), rank(input))` 。 axes为负数时, :math:`axes=axes+rank(input)` 。 - - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - -返回: 返回对维度进行压缩后的Tensor。数据类型与输入Tensor一致。 - -返回类型:Variable - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - import paddle.fluid.layers as layers - x = layers.data(name='x', shape=[5, 1, 10]) - y = layers.squeeze(input=x, axes=[1]) #y.shape=[5, 10] - - - - - - - - - diff --git a/doc/paddle/api/paddle/tensor/math/abs_cn.rst b/doc/paddle/api/paddle/tensor/math/abs_cn.rst deleted file mode 100644 index cf726de9f..000000000 --- a/doc/paddle/api/paddle/tensor/math/abs_cn.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. _cn_api_fluid_layers_abs: - -abs -------------------------------- - -.. py:function:: paddle.fluid.layers.abs(x, name=None) - -:alias_main: paddle.abs -:alias: paddle.abs,paddle.tensor.abs,paddle.tensor.math.abs -:old_api: paddle.fluid.layers.abs - - - -绝对值激活函数。 - -.. math:: - out = |x| - -参数: - - **x** (Variable)- 多维Tensor,数据类型为float32或float64。 - - **name** (str) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 - -返回:表示绝对值结果的Tensor,数据类型与x相同。 - -返回类型:Variable - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - data = fluid.layers.data(name="input", shape=[32, 784]) - result = fluid.layers.abs(data) diff --git a/doc/paddle/api/paddle/tensor/math/acos_cn.rst b/doc/paddle/api/paddle/tensor/math/acos_cn.rst deleted file mode 100644 index 9185569aa..000000000 --- a/doc/paddle/api/paddle/tensor/math/acos_cn.rst +++ /dev/null @@ -1,39 +0,0 @@ -.. _cn_api_fluid_layers_acos: - -acos -------------------------------- - -.. py:function:: paddle.fluid.layers.acos(x, name=None) - -:alias_main: paddle.acos -:alias: paddle.acos,paddle.tensor.acos,paddle.tensor.math.acos -:old_api: paddle.fluid.layers.acos - - - -arccosine激活函数。 - -.. math:: - out = cos^{-1}(x) - -参数: - - **x(Variable)** - acos的输入Tensor,数据类型为 float32 或 float64 - - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 -返回: `acos` 的输出Tensor,数据类型与 `x` 相同。 - -返回类型: Variable - - - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - data = fluid.layers.data(name="input", shape=[4]) - # if data is [-0.8183, 0.4912, -0.6444, 0.0371] - result = fluid.layers.acos(data) - # result is [2.5293, 1.0573, 2.2711, 1.5336] - - - diff --git a/doc/paddle/api/paddle/tensor/math/asin_cn.rst b/doc/paddle/api/paddle/tensor/math/asin_cn.rst deleted file mode 100644 index 03109d28e..000000000 --- a/doc/paddle/api/paddle/tensor/math/asin_cn.rst +++ /dev/null @@ -1,39 +0,0 @@ -.. _cn_api_fluid_layers_asin: - -asin -------------------------------- - -.. py:function:: paddle.fluid.layers.asin(x, name=None) - -:alias_main: paddle.asin -:alias: paddle.asin,paddle.tensor.asin,paddle.tensor.math.asin -:old_api: paddle.fluid.layers.asin - - - -arcsine激活函数。 - -.. math:: - out = sin^{-1}(x) - - -参数: - - **x(Variable)** - asin的输入Tensor,数据类型为 float32 或 float64 - - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 - -返回: `asin` 的输出Tensor,数据类型与 `x` 相同。 - -返回类型: Variable - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - data = fluid.layers.data(name="input", shape=[4]) - # if data is [-0.8183, 0.4912, -0.6444, 0.0371] - result = fluid.layers.asin(data) - # result is [-0.9585, 0.5135, -0.7003, 0.0372] - - - diff --git a/doc/paddle/api/paddle/tensor/math/atan_cn.rst b/doc/paddle/api/paddle/tensor/math/atan_cn.rst deleted file mode 100644 index 382f6b09b..000000000 --- a/doc/paddle/api/paddle/tensor/math/atan_cn.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. _cn_api_tensor_atan: - -atan -------------------------------- - -.. py:function:: paddle.atan(x, name=None, out=None) - -arctanh 激活函数。 - -.. math:: - out = tanh^{-1}(x) - -参数: - - **x(Variable)** - atan的输入Tensor,数据类型为 float32 或 float64 - - **name** (str|None) – 具体用法请参见 :ref:`cn_api_guide_Name` ,一般无需设置,默认值为None。 - - **out** (Variable, 可选) – 指定存储运算结果的Tensor。如果设置为None或者不设置,将创建新的Tensor存储运算结果,默认值为None。 - -返回:返回类型为Variable(Tensor|LoDTensor), 数据类型同输入一致。 - -**代码示例**: - -.. code-block:: python - - import numpy as np - import paddle - import paddle.fluid as fluid - - inputs = fluid.layers.data(name="x", shape = [3], dtype='float32') - output = paddle.atan(inputs) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - img = np.array([-0.8183, 0.4912, -0.6444, 0.0371]).astype(np.float32) - - res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) - print(res) - #[array([-0.6858003, 0.45658287, -0.5724284, 0.03708299], dtype=float32)] diff --git a/doc/paddle/api/paddle/tensor/math/ceil_cn.rst b/doc/paddle/api/paddle/tensor/math/ceil_cn.rst deleted file mode 100644 index 27ca3dd54..000000000 --- a/doc/paddle/api/paddle/tensor/math/ceil_cn.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. _cn_api_fluid_layers_ceil: - -ceil -------------------------------- - -.. py:function:: paddle.fluid.layers.ceil(x, name=None) - -:alias_main: paddle.ceil -:alias: paddle.ceil,paddle.tensor.ceil,paddle.tensor.math.ceil -:old_api: paddle.fluid.layers.ceil - - - -向上取整运算函数。 - -.. math:: - out = \left \lceil x \right \rceil - - - -参数: - - **x** (Variable) - 该OP的输入为多维Tensor。数据类型为float32或float64。 - - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 - -返回: 输出为Tensor,与 ``x`` 维度相同、数据类型相同。 - -返回类型: Variable - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - input_ceil = np.array([[-1.5,6],[1,15.6]]) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(input_ceil) - y = fluid.layers.ceil(x) - print(y.numpy()) - # [[-1. 6.] - # [ 1. 16.]] diff --git a/doc/paddle/api/paddle/tensor/math/cos_cn.rst b/doc/paddle/api/paddle/tensor/math/cos_cn.rst deleted file mode 100644 index 4f31c473c..000000000 --- a/doc/paddle/api/paddle/tensor/math/cos_cn.rst +++ /dev/null @@ -1,44 +0,0 @@ -.. _cn_api_fluid_layers_cos: - -cos -------------------------------- - -.. py:function:: paddle.fluid.layers.cos(x, name=None) - -:alias_main: paddle.cos -:alias: paddle.cos,paddle.tensor.cos,paddle.tensor.math.cos -:old_api: paddle.fluid.layers.cos - - - -余弦函数。 - -.. math:: - - out = cos(x) - - - -参数: - - **x** (Variable) - 该OP的输入为多维Tensor,数据类型为float32,float64。 - - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 - - -返回:输出为Tensor,与 ``x`` 维度相同、数据类型相同。 - -返回类型:Variable - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - input_cos = np.array([[-1,np.pi],[1,15.6]]) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(input_cos) - y = fluid.layers.cos(x) - print(y.numpy()) - # [[ 0.54030231 -1. ] - # [ 0.54030231 -0.99417763]] diff --git a/doc/paddle/api/paddle/tensor/math/elementwise_add_cn.rst b/doc/paddle/api/paddle/tensor/math/elementwise_add_cn.rst deleted file mode 100644 index 7414eae16..000000000 --- a/doc/paddle/api/paddle/tensor/math/elementwise_add_cn.rst +++ /dev/null @@ -1,121 +0,0 @@ -.. _cn_api_fluid_layers_elementwise_add: - -elementwise_add -------------------------------- - -.. py:function:: paddle.fluid.layers.elementwise_add(x, y, axis=-1, act=None, name=None) - -:alias_main: paddle.elementwise_add -:alias: paddle.elementwise_add,paddle.tensor.elementwise_add,paddle.tensor.math.elementwise_add -:old_api: paddle.fluid.layers.elementwise_add - - - -该OP是逐元素相加算子,输入 ``x`` 与输入 ``y`` 逐元素相加,并将各个位置的输出元素保存到返回结果中。 - -等式为: - -.. math:: - Out = X + Y - -- :math:`X` :多维Tensor。 -- :math:`Y` :维度必须小于等于X维度的Tensor。 - -对于这个运算算子有2种情况: - 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 - 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 - -对于情况2: - 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 - 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。 - 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 - -例如: - -.. code-block:: text - - shape(X) = (2, 3, 4, 5), shape(Y) = (,) - shape(X) = (2, 3, 4, 5), shape(Y) = (5,) - shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 - shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 - shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 - shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 - -参数: - - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 - - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 - - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。 - - **act** (str,可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。 - - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 - - -返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。 - -返回类型: Variable。 - -**代码示例 1** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.array([2, 3, 4]), - "y": np.array([1, 5, 2]) - } - x = fluid.layers.data(name="x", shape=[3], dtype='float32') - y = fluid.layers.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_add(x, y) - # z = x + y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # [3., 8., 6.] - -**代码示例 2** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), - "y": np.random.randint(1, 5, size=[3, 4]).astype('float32') - } - x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_add(x, y, axis=1) - # z = x + y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] - -**代码示例 3** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), - "y": np.random.randint(1, 5, size=[5]).astype('float32') - } - x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.layers.data(name="y", shape=[5], dtype='float32') - # z = x + y - z = fluid.layers.elementwise_add(x, y, axis=3) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] - - - - diff --git a/doc/paddle/api/paddle/tensor/math/elementwise_div_cn.rst b/doc/paddle/api/paddle/tensor/math/elementwise_div_cn.rst deleted file mode 100644 index d4d12f36b..000000000 --- a/doc/paddle/api/paddle/tensor/math/elementwise_div_cn.rst +++ /dev/null @@ -1,121 +0,0 @@ -.. _cn_api_fluid_layers_elementwise_div: - -elementwise_div -------------------------------- - -.. py:function:: paddle.fluid.layers.elementwise_div(x, y, axis=-1, act=None, name=None) - -:alias_main: paddle.elementwise_div -:alias: paddle.elementwise_div,paddle.tensor.elementwise_div,paddle.tensor.math.elementwise_div -:old_api: paddle.fluid.layers.elementwise_div - - - -该OP是逐元素相除算子,输入 ``x`` 与输入 ``y`` 逐元素相除,并将各个位置的输出元素保存到返回结果中。 - -等式是: - -.. math:: - Out = X / Y - -- :math:`X` :多维Tensor。 -- :math:`Y` :维度必须小于等于X维度的Tensor。 - -对于这个运算算子有2种情况: - 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 - 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 - -对于情况2: - 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 - 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。 - 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 - -例如: - -.. code-block:: text - - shape(X) = (2, 3, 4, 5), shape(Y) = (,) - shape(X) = (2, 3, 4, 5), shape(Y) = (5,) - shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 - shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 - shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 - shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 - -参数: - - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 - - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 - - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。 - - **act** (str,可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。 - - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 - - -返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。 - -返回类型: Variable。 - -**代码示例 1** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.array([2, 3, 4]), - "y": np.array([1, 5, 2]) - } - x = fluid.layers.data(name="x", shape=[3], dtype='float32') - y = fluid.layers.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_div(x, y) - # z = x / y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # [2., 0.6, 2.] - -**代码示例 2** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), - "y": np.random.randint(1, 5, size=[3, 4]).astype('float32') - } - x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_div(x, y, axis=1) - # z = x / y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] - -**代码示例 3** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), - "y": np.random.randint(1, 5, size=[5]).astype('float32') - } - x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.layers.data(name="y", shape=[5], dtype='float32') - z = fluid.layers.elementwise_div(x, y, axis=3) - # z = x / y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] - - - - diff --git a/doc/paddle/api/paddle/tensor/math/elementwise_floordiv_cn.rst b/doc/paddle/api/paddle/tensor/math/elementwise_floordiv_cn.rst deleted file mode 100644 index bd19626b6..000000000 --- a/doc/paddle/api/paddle/tensor/math/elementwise_floordiv_cn.rst +++ /dev/null @@ -1,116 +0,0 @@ -.. _cn_api_fluid_layers_elementwise_floordiv: - -elementwise_floordiv -------------------------------- - -.. py:function:: paddle.fluid.layers.elementwise_floordiv(x, y, axis=-1, act=None, name=None) - -:alias_main: paddle.elementwise_floordiv -:alias: paddle.elementwise_floordiv,paddle.tensor.elementwise_floordiv,paddle.tensor.math.elementwise_floordiv -:old_api: paddle.fluid.layers.elementwise_floordiv - - - -该OP是逐元素整除算子,输入 ``x`` 与输入 ``y`` 逐元素整除,并将各个位置的输出元素保存到返回结果中。 - -等式为: - -.. math:: - Out = X // Y - -- :math:`X` :多维Tensor。 -- :math:`Y` :维度必须小于等于X维度的Tensor。 - -对于这个运算算子有2种情况: - 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 - 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 - -对于情况2: - 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 - 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。 - 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 - -例如: - -.. code-block:: text - - shape(X) = (2, 3, 4, 5), shape(Y) = (,) - shape(X) = (2, 3, 4, 5), shape(Y) = (5,) - shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 - shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 - shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 - shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 - -参数: - - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 - - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 - - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。 - - **act** (str,可选)- 激活函数名称,作用于输出上。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。如果为None则不添加激活函数。默认值为None。 - - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 - - -返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。 - -返回类型: Variable。 - -**代码示例 1** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.array([2, 3, 4]), - "y": np.array([1, 5, 2]) - } - - x = fluid.data(name="x", shape=[3], dtype='int64') - y = fluid.data(name="y", shape=[3], dtype='int64') - z = fluid.layers.elementwise_floordiv(x, y) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) - print(z_value) #[2,0,2] - -**代码示例 2** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]), - "y": np.random.randint(1, 5, size=[3, 4]) - } - - x = fluid.data(name="x", shape=[2,3,4,5], dtype='int64') - y = fluid.data(name="y", shape=[3,4], dtype='int64') - z = fluid.layers.elementwise_floordiv(x, y, axis=1) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] - -**代码示例 3** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]), - "y": np.random.randint(1, 5, size=[5]) - } - - x = fluid.data(name="x", shape=[2,3,4,5], dtype='int64') - y = fluid.data(name="y", shape=[5], dtype='int64') - z = fluid.layers.elementwise_floordiv(x, y, axis=3) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] diff --git a/doc/paddle/api/paddle/tensor/math/elementwise_mod_cn.rst b/doc/paddle/api/paddle/tensor/math/elementwise_mod_cn.rst deleted file mode 100644 index 5e53e8379..000000000 --- a/doc/paddle/api/paddle/tensor/math/elementwise_mod_cn.rst +++ /dev/null @@ -1,117 +0,0 @@ -.. _cn_api_fluid_layers_elementwise_mod: - -elementwise_mod -------------------------------- - -.. py:function:: paddle.fluid.layers.elementwise_mod(x, y, axis=-1, act=None, name=None) - -:alias_main: paddle.elementwise_mod -:alias: paddle.elementwise_mod,paddle.tensor.elementwise_mod,paddle.tensor.math.elementwise_mod -:old_api: paddle.fluid.layers.elementwise_mod - - - -该OP是逐元素取模算子,输入 ``x`` 与输入 ``y`` 逐元素取模,并将各个位置的输出元素保存到返回结果中。 - -等式为: - -.. math:: - Out = X \% Y - -- :math:`X` :多维Tensor。 -- :math:`Y` :维度必须小于等于X维度的Tensor。 - -对于这个运算算子有2种情况: - 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 - 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 - -对于情况2: - 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 - 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。 - 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 - -例如: - -.. code-block:: text - - shape(X) = (2, 3, 4, 5), shape(Y) = (,) - shape(X) = (2, 3, 4, 5), shape(Y) = (5,) - shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 - shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 - shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 - shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 - -参数: - - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 - - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 - - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。 - - **act** (str,可选)- 激活函数名称,作用于输出上。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。如果为None则不添加激活函数。默认值为None。 - - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 - - -返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。 - -返回类型: Variable。 - - -**代码示例 1** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.array([2, 3, 4]), - "y": np.array([1, 5, 2]) - } - - x = fluid.data(name="x", shape=[3], dtype='int64') - y = fluid.data(name="y", shape=[3], dtype='int64') - z = fluid.layers.elementwise_mod(x, y) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) - print(z_value) #[0,3,0] - -**代码示例 2** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]), - "y": np.random.randint(1, 5, size=[3, 4]) - } - - x = fluid.data(name="x", shape=[2,3,4,5], dtype='int64') - y = fluid.data(name="y", shape=[3,4], dtype='int64') - z = fluid.layers.elementwise_mod(x, y, axis=1) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] - -**代码示例 3** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]), - "y": np.random.randint(1, 5, size=[5]) - } - - x = fluid.data(name="x", shape=[2,3,4,5], dtype='int64') - y = fluid.data(name="y", shape=[5], dtype='int64') - z = fluid.layers.elementwise_mod(x, y, axis=3) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] diff --git a/doc/paddle/api/paddle/tensor/math/elementwise_pow_cn.rst b/doc/paddle/api/paddle/tensor/math/elementwise_pow_cn.rst deleted file mode 100644 index 6f08b313a..000000000 --- a/doc/paddle/api/paddle/tensor/math/elementwise_pow_cn.rst +++ /dev/null @@ -1,84 +0,0 @@ -.. _cn_api_fluid_layers_elementwise_pow: - -elementwise_pow -------------------------------- - -.. py:function:: paddle.fluid.layers.elementwise_pow(x, y, axis=-1, act=None, name=None) - -:alias_main: paddle.elementwise_pow -:alias: paddle.elementwise_pow,paddle.tensor.elementwise_pow,paddle.tensor.math.elementwise_pow -:old_api: paddle.fluid.layers.elementwise_pow - - -该OP逐元素对输入Tensor进行幂操作。 - -等式是: - -.. math:: - Out = X ^ Y - -- :math:`X` :多维Tensor。 -- :math:`Y` :多维Tensor。 - -此运算算子有两种情况: - 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 - 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 - -对于情况2: - 1. 用 :math:`Y` 的 ``shape`` 匹配 :math:`X` 的 ``shape``,其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 - 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis = rank(X)-rank(Y)` 。 - 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 - -例如: - -.. code-block:: text - - shape(X) = (2, 3, 4, 5), shape(Y) = (,) - shape(X) = (2, 3, 4, 5), shape(Y) = (5,) - shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 - shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 - shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 - shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 - -参数: - - **x** (Variable)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - - **y** (Variable)- 多维Tensor。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64`` 。 - - **axis** (int32, 可选)- Y的维度对应到X维度上时的索引。默认值为 -1。 - - **act** (string, 可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。 - - **name** (string, 可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 - -返回: 维度和数据类型与 ``x`` 相同的多维Tensor。 - -返回类型: 多维Tensor。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - def gen_data(): - return { - "x": np.array([2, 3, 4]), - "y": np.array([1, 5, 2]) - } - - x = fluid.layers.data(name="x", shape=[3], dtype='float32') - y = fluid.layers.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_pow(x, y) - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - - print(z_value) #[2, 243, 16] - - - - - - - - diff --git a/doc/paddle/api/paddle/tensor/math/elementwise_sub_cn.rst b/doc/paddle/api/paddle/tensor/math/elementwise_sub_cn.rst deleted file mode 100644 index c5886ad2e..000000000 --- a/doc/paddle/api/paddle/tensor/math/elementwise_sub_cn.rst +++ /dev/null @@ -1,122 +0,0 @@ -.. _cn_api_fluid_layers_elementwise_sub: - -elementwise_sub -------------------------------- - -.. py:function:: paddle.fluid.layers.elementwise_sub(x, y, axis=-1, act=None, name=None) - -:alias_main: paddle.elementwise_sub -:alias: paddle.elementwise_sub,paddle.tensor.elementwise_sub,paddle.tensor.math.elementwise_sub -:old_api: paddle.fluid.layers.elementwise_sub - - - -该OP是逐元素相减算子,输入 ``x`` 与输入 ``y`` 逐元素相减,并将各个位置的输出元素保存到返回结果中。 - -等式是: - -.. math:: - Out = X - Y - -- :math:`X` :多维Tensor。 -- :math:`Y` :维度必须小于等于X维度的Tensor。 - -对于这个运算算子有2种情况: - 1. :math:`Y` 的 ``shape`` 与 :math:`X` 相同。 - 2. :math:`Y` 的 ``shape`` 是 :math:`X` 的连续子序列。 - -对于情况2: - 1. 用 :math:`Y` 匹配 :math:`X` 的形状(shape),其中 ``axis`` 是 :math:`Y` 在 :math:`X` 上的起始维度的位置。 - 2. 如果 ``axis`` 为-1(默认值),则 :math:`axis= rank(X)-rank(Y)` 。 - 3. 考虑到子序列, :math:`Y` 的大小为1的尾部维度将被忽略,例如shape(Y)=(2,1)=>(2)。 - -例如: - -.. code-block:: text - - shape(X) = (2, 3, 4, 5), shape(Y) = (,) - shape(X) = (2, 3, 4, 5), shape(Y) = (5,) - shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 - shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 - shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 - shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 - -参数: - - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 - - **y** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` 。数据类型为 ``float32`` 、 ``float64`` 、 ``int32`` 或 ``int64``。 - - **axis** (int32,可选)- ``y`` 的维度对应到 ``x`` 维度上时的索引。默认值为 -1。 - - **act** (str,可选)- 激活函数名称,作用于输出上。默认值为None。详细请参考 :ref:`api_guide_activations` , 常见的激活函数有: ``relu`` ``tanh`` ``sigmoid`` 等。 - - **name** (str,可选)- 输出的名字。默认值为None。该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` 。 - - -返回: 维度与 ``x`` 相同的 ``Tensor`` 或 ``LoDTensor`` ,数据类型与 ``x`` 相同。 - -返回类型: Variable。 - -**代码示例 1** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.array([2, 3, 4]), - "y": np.array([1, 5, 2]) - } - x = fluid.layers.data(name="x", shape=[3], dtype='float32') - y = fluid.layers.data(name="y", shape=[3], dtype='float32') - z = fluid.layers.elementwise_sub(x, y) - # z = x - y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # [1., -2., 2.] - -**代码示例 2** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), - "y": np.random.randint(1, 5, size=[3, 4]).astype('float32') - } - x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.layers.data(name="y", shape=[3,4], dtype='float32') - z = fluid.layers.elementwise_sub(x, y, axis=1) - # z = x - y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] - -**代码示例 3** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - def gen_data(): - return { - "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'), - "y": np.random.randint(1, 5, size=[5]).astype('float32') - } - x = fluid.layers.data(name="x", shape=[2,3,4,5], dtype='float32') - y = fluid.layers.data(name="y", shape=[5], dtype='float32') - z = fluid.layers.elementwise_sub(x, y, axis=3) - # z = x - y - place = fluid.CPUPlace() - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), - fetch_list=[z.name]) - print(z_value) # z.shape=[2,3,4,5] - - - - - diff --git a/doc/paddle/api/paddle/tensor/math/erf_cn.rst b/doc/paddle/api/paddle/tensor/math/erf_cn.rst deleted file mode 100644 index c30dc7175..000000000 --- a/doc/paddle/api/paddle/tensor/math/erf_cn.rst +++ /dev/null @@ -1,75 +0,0 @@ -.. _cn_api_fluid_layers_erf: - -erf -------------------------------- - -.. py:function:: paddle.fluid.layers.erf(x) - -:alias_main: paddle.erf -:alias: paddle.erf,paddle.tensor.erf,paddle.tensor.math.erf,paddle.nn.functional.erf,paddle.nn.functional.activation.erf -:old_api: paddle.fluid.layers.erf - - - -逐元素计算 Erf 激活函数。更多细节请参考 `Error function `_ 。 - - -.. math:: - out = \frac{2}{\sqrt{\pi}} \int_{0}^{x}e^{- \eta^{2}}d\eta - -参数: - - **x** (Variable) - Erf Op 的输入,多维 Tensor 或 LoDTensor,数据类型为 float16, float32 或 float64。 - -返回: - - 多维 Tensor 或 LoDTensor, 数据类型为 float16, float32 或 float64, 和输入 x 的数据类型相同,形状和输入 x 相同。 - -返回类型: - - Variable - -**代码示例**: - -.. code-block:: python - - # declarative mode - import numpy as np - from paddle import fluid - - x = fluid.data(name="x", shape=(-1, 3), dtype="float32") - y = fluid.layers.erf(x) - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - start = fluid.default_startup_program() - main = fluid.default_main_program() - - data = np.random.randn(2, 3).astype("float32") - exe.run(start) - - y_np, = exe.run(main, feed={"x": data}, fetch_list=[y]) - - data - # array([[ 0.4643714 , -1.1509596 , 1.2538221 ], - # [ 0.34369683, 0.27478245, 1.1805398 ]], dtype=float32) - y_np - # array([[ 0.48863927, -0.8964121 , 0.9237998 ], - # [ 0.37307587, 0.30242872, 0.9049887 ]], dtype=float32) - -.. code-block:: python - - # imperative mode - import numpy as np - from paddle import fluid - import paddle.fluid.dygraph as dg - - data = np.random.randn(2, 3).astype("float32") - place = fluid.CPUPlace() - with dg.guard(place) as g: - x = dg.to_variable(data) - y = fluid.layers.erf(x) - y_np = y.numpy() - data - # array([[ 0.4643714 , -1.1509596 , 1.2538221 ], - # [ 0.34369683, 0.27478245, 1.1805398 ]], dtype=float32) - y_np - # array([[ 0.48863927, -0.8964121 , 0.9237998 ], - # [ 0.37307587, 0.30242872, 0.9049887 ]], dtype=float32) diff --git a/doc/paddle/api/paddle/tensor/math/exp_cn.rst b/doc/paddle/api/paddle/tensor/math/exp_cn.rst deleted file mode 100644 index 33f053e94..000000000 --- a/doc/paddle/api/paddle/tensor/math/exp_cn.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. _cn_api_fluid_layers_exp: - -exp -------------------------------- - -.. py:function:: paddle.fluid.layers.exp(x, name=None) - -:alias_main: paddle.exp -:alias: paddle.exp,paddle.tensor.exp,paddle.tensor.math.exp -:old_api: paddle.fluid.layers.exp - - - -对输入,逐元素进行以自然数e为底指数运算。 - -.. math:: - out = e^x - -参数: - - **x** (Variable) - 该OP的输入为多维Tensor。数据类型为float32,float64。 - - **name** (str, 可选) - 具体用法请参见 :ref:`api_guide_Name`,一般无需设置,默认值为None。 - -返回:输出为Tensor,与 ``x`` 维度相同、数据类型相同。 - -返回类型: Variable - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - input_exp = np.array([[-1.5,6],[1,15.6]]) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(input_exp) - y = fluid.layers.exp(x) - print(y.numpy()) - # [[2.23130160e-01 4.03428793e+02] - # [2.71828183e+00 5.95653801e+06]] diff --git a/doc/paddle/api/paddle/tensor/math/floor_cn.rst b/doc/paddle/api/paddle/tensor/math/floor_cn.rst deleted file mode 100644 index e1dc4edb7..000000000 --- a/doc/paddle/api/paddle/tensor/math/floor_cn.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. _cn_api_fluid_layers_floor: - -floor -------------------------------- - -.. py:function:: paddle.fluid.layers.floor(x, name=None) - -:alias_main: paddle.floor -:alias: paddle.floor,paddle.tensor.floor,paddle.tensor.math.floor -:old_api: paddle.fluid.layers.floor - - - -向下取整函数。 - -.. math:: - out = \left \lfloor x \right \rfloor - -参数: - - **x** - 该OP的输入为多维Tensor。数据类型必须为float32或float64。 - - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - -返回:输出为Tensor,与 ``x`` 维度相同、数据类型相同。 - -返回类型:Variable - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - data1 = fluid.layers.fill_constant(shape=[3, 2], value=2.5, dtype='float32') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]] - data2 = fluid.layers.fill_constant(shape=[2, 3], value=-2.5, dtype='float64') # [[-2.5, -2.5, -2.5], [-2.5, -2.5, -2.5]] - result1 = fluid.layers.floor(data1) # [[2., 2.], [2., 2.], [2., 2.]] - result2 = fluid.layers.floor(data2) # [[-3., -3., -3.], [-3., -3., -3.]] diff --git a/doc/paddle/api/paddle/tensor/math/increment_cn.rst b/doc/paddle/api/paddle/tensor/math/increment_cn.rst deleted file mode 100644 index 8edb5db4a..000000000 --- a/doc/paddle/api/paddle/tensor/math/increment_cn.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. _cn_api_fluid_layers_increment: - -increment -------------------------------- - -.. py:function:: paddle.fluid.layers.increment(x, value=1.0, in_place=True) - -:alias_main: paddle.increment -:alias: paddle.increment,paddle.tensor.increment,paddle.tensor.math.increment -:old_api: paddle.fluid.layers.increment - - - -使输入Tensor ``x`` 的数据累加 ``value`` , 该OP通常用于循环次数的计数。 - -参数: - - **x** (Variable) – 元素个数为1的Tensor,数据类型必须为float32,float64,int32,int64。 - - **value** (float,可选) – 需要增加的值,默认为1.0。 - - **in_place** (bool,可选) – 输出Tensor是否和输入Tensor ``x`` 复用同一块内存,默认为True。 - -返回:累加计算后的Tensor,形状、数据类型和 ``x`` 一致。 - -返回类型:Variable - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - counter = fluid.layers.zeros(shape=[1], dtype='float32') # [0.] - fluid.layers.increment(counter) # [1.] diff --git a/doc/paddle/api/paddle/tensor/math/log_cn.rst b/doc/paddle/api/paddle/tensor/math/log_cn.rst deleted file mode 100644 index dc8f40a80..000000000 --- a/doc/paddle/api/paddle/tensor/math/log_cn.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. _cn_api_fluid_layers_log: - -log -------------------------------- - -.. py:function:: paddle.fluid.layers.log(x, name=None) - -:alias_main: paddle.log -:alias: paddle.log,paddle.tensor.log,paddle.tensor.math.log -:old_api: paddle.fluid.layers.log - - - - -Log激活函数(计算自然对数) - -.. math:: - \\Out=ln(x)\\ - - -参数: - - **x** (Variable) – 该OP的输入为LodTensor/Tensor。数据类型为float32,float64。 - - **name** (str,可选) – 该参数供开发人员打印调试信息时使用,具体用法请参见 :ref:`api_guide_Name` ,默认值为None。 - -返回:Log算子自然对数输出 - -返回类型: Variable - 该OP的输出为LodTensor/Tensor,数据类型为输入一致。 - - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - # Graph Organizing - x = fluid.layers.data(name="x", shape=[1], dtype="float32") - res = fluid.layers.log(x) - - # Create an executor using CPU as an example - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - # Execute - x_i = np.array([[1], [2]]).astype(np.float32) - res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res]) - print(res_val) # [[0.], [0.6931472]] - diff --git a/doc/paddle/api/paddle/tensor/math/multiplex_cn.rst b/doc/paddle/api/paddle/tensor/math/multiplex_cn.rst deleted file mode 100644 index 1cc5bf39c..000000000 --- a/doc/paddle/api/paddle/tensor/math/multiplex_cn.rst +++ /dev/null @@ -1,76 +0,0 @@ -.. _cn_api_fluid_layers_multiplex: - -multiplex -------------------------------- - -.. py:function:: paddle.fluid.layers.multiplex(inputs, index) - -:alias_main: paddle.multiplex -:alias: paddle.multiplex,paddle.tensor.multiplex,paddle.tensor.math.multiplex -:old_api: paddle.fluid.layers.multiplex - - - -根据给定的index参数,该OP从每个输入Tensor中选择特定行构造输出Tensor。 - -设该OP输入包含 :math:`m` 个Tensor,其中 :math:`I_{i}` 代表第i个输入Tensor,:math:`i` 处于区间 :math:`[0,m)`。 - -设该OP输出为 :math:`O` ,其中 :math:`O[i]` 为输出的第i行,则输出满足: :math:`O[i] = I_{index[i]}[i]` - -示例: - -.. code-block:: text - - # 输入为4个shape为[4,4]的Tensor - inputs = [[[0,0,3,4], [0,1,3,4], [0,2,4,4], [0,3,3,4]], - [[1,0,3,4], [1,1,7,8], [1,2,4,2], [1,3,3,4]], - [[2,0,3,4], [2,1,7,8], [2,2,4,2], [2,3,3,4]], - [[3,0,3,4], [3,1,7,8], [3,2,4,2], [3,3,3,4]]] - - # index为shape为[4,1]的Tensor - index = [[3],[0],[1],[2]] - - # 输出shape为[4,4] - out = [[3,0,3,4] // out[0] = inputs[index[0]][0] = inputs[3][0] = [3,0,3,4] - [0,1,3,4] // out[1] = inputs[index[1]][1] = inputs[0][1] = [0,1,3,4] - [1,2,4,2] // out[2] = inputs[index[2]][2] = inputs[1][2] = [1,2,4,2] - [2,3,3,4]] // out[3] = inputs[index[3]][3] = inputs[2][3] = [2,3,3,4] - -参数: - - **inputs** (list) - 为输入Tensor列表,列表元素为数据类型为float32,float64,int32,int64的多维Tensor。所有输入Tensor的shape应相同,秩必须至少为2。 - - **index** (Variable)- 用来选择输入Tensor中的某些行构建输出Tensor的索引,为数据类型为int32或int64、shape为[M, 1]的2-D Tensor,其中M为输入Tensor个数。 - -返回:进行Multiplex运算后的输出Tensor。 - -返回类型:Variable(Tensor)。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - x1 = fluid.layers.data(name='x1', shape=[4], dtype='float32') - x2 = fluid.layers.data(name='x2', shape=[4], dtype='float32') - index = fluid.layers.data(name='index', shape=[1], dtype='int32') - out = fluid.layers.multiplex(inputs=[x1, x2], index=index) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - img1 = np.array([[1, 2], [3, 4]]).astype(np.float32) - img2 = np.array([[5, 6], [7, 8]]).astype(np.float32) - index = np.array([[1], [0]]).astype(np.int32) - - res = exe.run(fluid.default_main_program(), feed={'x1':img1, 'x2':img2, 'index':index}, fetch_list=[out]) - print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)] - - - - - - - - - diff --git a/doc/paddle/api/paddle/tensor/math/pow_cn.rst b/doc/paddle/api/paddle/tensor/math/pow_cn.rst index 01987cc79..40eaf5421 100644 --- a/doc/paddle/api/paddle/tensor/math/pow_cn.rst +++ b/doc/paddle/api/paddle/tensor/math/pow_cn.rst @@ -1,28 +1,27 @@ -.. _cn_api_tensor_argmax: +.. _cn_api_fluid_layers_pow: pow ------------------------------- -.. py:function:: paddle.pow(input, exponent, out=None, name=None): +.. py:function:: paddle.pow(x, exponent, name=None) -:alias_main: paddle.pow -:alias: paddle.pow,paddle.tensor.pow,paddle.tensor.math.pow -:update_api: paddle.fluid.layers.pow 该OP是指数激活算子: .. math:: - out = x^{exponent} + + out = x^{exponent} + +**注意:如果需要对输入进行 elementwise_pow 操作,请查使用** :ref:`cn_api_fluid_layers_elementwise_pow` 。 参数: - - **x** (Variable)- 多维 ``Tensor`` 或 ``LoDTensor`` ,数据类型为 ``float32`` 或 ``float64`` 。 - - **exponent** (float32|Variable,可选)- ``float32`` 或形状为[1]的 ``Tensor`` 或 ``LoDTensor``,数据类型为 ``float32``。Pow OP的指数因子。默认值:1.0。 - - **out** (Variable, 可选) - 默认值None,如果out不为空,则该运算结果存储在out变量中。 - - **name** (str,可选) - 默认值None,输出的名称。该参数供开发人员打印调试信息时使用,具体用法参见 :ref:`api_guide_name`。当out和name同时不为空时,结果输出变量名与out保持一致。 + - **x** (Variable)- 多维 ``Variable``,数据类型为 ``float32`` 或 ``float64`` 。 + - **exponent** (float32|Variable)- ``float32`` 或形状为[1]的 ``Variable``,数据类型为 ``float32``。 + - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置。默认值: ``None``。 -返回:维度与输入 `x` 相同的 ``Tensor`` 或 ``LoDTensor``,数据类型与 ``x`` 相同。 +返回:维度与输入 `x` 相同的 ``Variable``,数据类型与 ``x`` 相同。 返回类型:Variable。 @@ -31,17 +30,26 @@ pow .. code-block:: python - import paddle - import paddle.fluid as fluid - x = fluid.data(name="x", shape=[32,32], dtype="float32") - - # 示例1: 参数exponent是个浮点数 - res = fluid.data(name="output", shape=[32,32], dtype="float32") - y_1 = paddle.pow(x, 2.0, out=res) - # y_1 is x^{2.0} - - # 示例2: 参数exponent是个变量 - exponent_tensor = fluid.layers.fill_constant([1], "float32", 3.0) - res = fluid.data(name="output", shape=[32,32], dtype="float32") - y_2 = paddle.pow(x, exponent_tensor, out=res) - # y_2 is x^{3.0} \ No newline at end of file + import paddle + import numpy as np + x = fluid.data(name="x", shape=[32,32], dtype="float32") + paddle.enable_imperative() + + # example 1: exponent is a float + x_data = np.array([1, 2, 3]) + exponent = 2 + x = paddle.imperative.to_variable(x_data) + res = paddle.pow(x, exponent) + print(res.numpy()) # [1 4 9] + + # example 2: exponent is a Variable + exponent = paddle.fill_constant(shape=[1], value=2, dtype='float32') + res = paddle.pow(x, exponent) + print(res.numpy()) # [1 4 9] + + + + + + + diff --git a/doc/paddle/api/paddle/tensor/math/reciprocal_cn.rst b/doc/paddle/api/paddle/tensor/math/reciprocal_cn.rst deleted file mode 100644 index a76a495a5..000000000 --- a/doc/paddle/api/paddle/tensor/math/reciprocal_cn.rst +++ /dev/null @@ -1,43 +0,0 @@ -.. _cn_api_fluid_layers_reciprocal: - -reciprocal -------------------------------- - -.. py:function:: paddle.fluid.layers.reciprocal(x, name=None) - -:alias_main: paddle.reciprocal -:alias: paddle.reciprocal,paddle.tensor.reciprocal,paddle.tensor.math.reciprocal -:old_api: paddle.fluid.layers.reciprocal - - - -reciprocal 对输入Tensor取倒数 - - -.. math:: - out = \frac{1}{x} - -参数: - - - **x** - 输入的多维Tensor,支持的数据类型为float32,float64。 - - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - - -返回: 对输入取倒数得到的Tensor,输出Tensor数据类型和维度与输入相同。 - -**代码示例**: - -.. code-block:: python - - import paddle - import numpy as np - - paddle.enable_imperative() - x_data = np.array([1, 2, 3, 4]).astype(np.float32) - x = paddle.imperative.to_variable(x_data) - res = paddle.%s(x) - print(res.numpy()) - - - - diff --git a/doc/paddle/api/paddle/tensor/math/reduce_max_cn.rst b/doc/paddle/api/paddle/tensor/math/reduce_max_cn.rst deleted file mode 100644 index 37cc3db57..000000000 --- a/doc/paddle/api/paddle/tensor/math/reduce_max_cn.rst +++ /dev/null @@ -1,57 +0,0 @@ -.. _cn_api_fluid_layers_reduce_max: - -reduce_max -------------------------------- - -.. py:function:: paddle.fluid.layers.reduce_max(input, dim=None, keep_dim=False, name=None) - -:alias_main: paddle.reduce_max -:alias: paddle.reduce_max,paddle.tensor.reduce_max,paddle.tensor.math.reduce_max -:old_api: paddle.fluid.layers.reduce_max - - - -该OP是对指定维度上的Tensor元素求最大值运算,并输出相应的计算结果。 - -参数: - - **input** (Variable)- 输入变量为多维Tensor或LoDTensor,支持数据类型为float32,float64,int32,int64。 - - **dim** (list | int ,可选)- 求最大值运算的维度。如果为None,则计算所有元素的最大值并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。 - - **keep_dim** (bool)- 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。 - - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - -返回: 在指定dim上进行求最大值运算的Tensor,数据类型和输入数据类型一致。 - -返回类型: 变量(Variable) - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - # x是一个Tensor,元素如下: - # [[0.2, 0.3, 0.5, 0.9] - # [0.1, 0.2, 0.6, 0.7]] - # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 - x = fluid.data(name='x', shape=[2, 4], dtype='float32') - fluid.layers.reduce_max(x) # [0.9] - fluid.layers.reduce_max(x, dim=0) # [0.2, 0.3, 0.6, 0.9] - fluid.layers.reduce_max(x, dim=-1) # [0.9, 0.7] - fluid.layers.reduce_max(x, dim=1, keep_dim=True) # [[0.9], [0.7]] - - # y是一个shape为[2, 2, 2]的Tensor,元素如下: - # [[[1.0, 2.0], [3.0, 4.0]], - # [[5.0, 6.0], [7.0, 8.0]]] - # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 - y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') - fluid.layers.reduce_max(y, dim=[1, 2]) # [4.0, 8.0] - fluid.layers.reduce_max(y, dim=[0, 1]) # [7.0, 8.0] - - - - - - - - - - diff --git a/doc/paddle/api/paddle/tensor/math/reduce_min_cn.rst b/doc/paddle/api/paddle/tensor/math/reduce_min_cn.rst deleted file mode 100644 index 2517c935e..000000000 --- a/doc/paddle/api/paddle/tensor/math/reduce_min_cn.rst +++ /dev/null @@ -1,57 +0,0 @@ -.. _cn_api_fluid_layers_reduce_min: - -reduce_min -------------------------------- - -.. py:function:: paddle.fluid.layers.reduce_min(input, dim=None, keep_dim=False, name=None) - -:alias_main: paddle.reduce_min -:alias: paddle.reduce_min,paddle.tensor.reduce_min,paddle.tensor.math.reduce_min -:old_api: paddle.fluid.layers.reduce_min - - - -该OP是对指定维度上的Tensor元素求最小值运算,并输出相应的计算结果。 - -参数: - - **input** (Variable)- 输入变量为多维Tensor或LoDTensor,支持数据类型为float32,float64,int32,int64。 - - **dim** (list | int ,可选)- 求最小值运算的维度。如果为None,则计算所有元素的最小值并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。 - - **keep_dim** (bool)- 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。 - - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - -返回: 在指定dim上进行求最小值运算的Tensor,数据类型和输入数据类型一致。 - -返回类型: 变量(Variable) - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - # x是一个Tensor,元素如下: - # [[0.2, 0.3, 0.5, 0.9] - # [0.1, 0.2, 0.6, 0.7]] - # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 - x = fluid.data(name='x', shape=[2, 4], dtype='float32') - fluid.layers.reduce_min(x) # [0.1] - fluid.layers.reduce_min(x, dim=0) # [0.1, 0.2, 0.5, 0.7] - fluid.layers.reduce_min(x, dim=-1) # [0.2, 0.1] - fluid.layers.reduce_min(x, dim=1, keep_dim=True) # [[0.2], [0.1]] - - # y是一个shape为[2, 2, 2]的Tensor元素如下: - # [[[1.0, 2.0], [3.0, 4.0]], - # [[5.0, 6.0], [7.0, 8.0]]] - # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 - y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') - fluid.layers.reduce_min(y, dim=[1, 2]) # [1.0, 5.0] - fluid.layers.reduce_min(y, dim=[0, 1]) # [1.0, 2.0] - - - - - - - - - - diff --git a/doc/paddle/api/paddle/tensor/math/reduce_prod_cn.rst b/doc/paddle/api/paddle/tensor/math/reduce_prod_cn.rst deleted file mode 100644 index a1b65ee1d..000000000 --- a/doc/paddle/api/paddle/tensor/math/reduce_prod_cn.rst +++ /dev/null @@ -1,58 +0,0 @@ -.. _cn_api_fluid_layers_reduce_prod: - -reduce_prod -------------------------------- - -.. py:function:: paddle.fluid.layers.reduce_prod(input, dim=None, keep_dim=False, name=None) - -:alias_main: paddle.reduce_prod -:alias: paddle.reduce_prod,paddle.tensor.reduce_prod,paddle.tensor.math.reduce_prod -:old_api: paddle.fluid.layers.reduce_prod - - - -该OP是对指定维度上的Tensor元素进行求乘积运算,并输出相应的计算结果。 - -参数: - - **input** (Variable)- 输入变量为多维Tensor或LoDTensor,支持数据类型为float32,float64,int32,int64。 - - **dim** (list | int ,可选)- 求乘积运算的维度。如果为None,则计算所有元素的乘积并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。 - - **keep_dim** (bool)- 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。 - - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - -返回: 在指定dim上进行求乘积运算的Tensor,数据类型和输入数据类型一致。 - -返回类型: 变量(Variable) - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - # x是一个Tensor,元素如下: - # [[0.2, 0.3, 0.5, 0.9] - # [0.1, 0.2, 0.6, 0.7]] - # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 - x = fluid.data(name='x', shape=[2, 4], dtype='float32') - fluid.layers.reduce_prod(x) # [0.0002268] - fluid.layers.reduce_prod(x, dim=0) # [0.02, 0.06, 0.3, 0.63] - fluid.layers.reduce_prod(x, dim=-1) # [0.027, 0.0084] - fluid.layers.reduce_prod(x, dim=1, - keep_dim=True) # [[0.027], [0.0084]] - - # y 是一个shape为[2, 2, 2]的Tensor元素如下: - # [[[1.0, 2.0], [3.0, 4.0]], - # [[5.0, 6.0], [7.0, 8.0]]] - # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 - y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') - fluid.layers.reduce_prod(y, dim=[1, 2]) # [24.0, 1680.0] - fluid.layers.reduce_prod(y, dim=[0, 1]) # [105.0, 384.0] - - - - - - - - - - diff --git a/doc/paddle/api/paddle/tensor/math/reduce_sum_cn.rst b/doc/paddle/api/paddle/tensor/math/reduce_sum_cn.rst deleted file mode 100644 index da06baf68..000000000 --- a/doc/paddle/api/paddle/tensor/math/reduce_sum_cn.rst +++ /dev/null @@ -1,57 +0,0 @@ -.. _cn_api_fluid_layers_reduce_sum: - -reduce_sum -------------------------------- - -.. py:function:: paddle.fluid.layers.reduce_sum(input, dim=None, keep_dim=False, name=None) - -:alias_main: paddle.reduce_sum -:alias: paddle.reduce_sum,paddle.tensor.reduce_sum,paddle.tensor.math.reduce_sum -:old_api: paddle.fluid.layers.reduce_sum - - - -该OP是对指定维度上的Tensor元素进行求和运算,并输出相应的计算结果。 - -参数: - - **input** (Variable)- 输入变量为多维Tensor或LoDTensor,支持数据类型为float32,float64,int32,int64。 - - **dim** (list | int ,可选)- 求和运算的维度。如果为None,则计算所有元素的和并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。 - - **keep_dim** (bool)- 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。 - - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - -返回: 在指定dim上进行求和运算的Tensor,数据类型和输入数据类型一致。 - -返回类型: 变量(Variable) - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - # x是一个Tensor,元素如下: - # [[0.2, 0.3, 0.5, 0.9] - # [0.1, 0.2, 0.6, 0.7]] - # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 - x = fluid.data(name='x', shape=[2, 4], dtype='float32') - fluid.layers.reduce_sum(x) # [3.5] - fluid.layers.reduce_sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6] - fluid.layers.reduce_sum(x, dim=-1) # [1.9, 1.6] - fluid.layers.reduce_sum(x, dim=1, keep_dim=True) # [[1.9], [1.6]] - - # y 是一个shape为[2, 2, 2]的Tensor元素如下: - # [[[1, 2], [3, 4]], - # [[5, 6], [7, 8]]] - # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 - y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') - fluid.layers.reduce_sum(y, dim=[1, 2]) # [10, 26] - fluid.layers.reduce_sum(y, dim=[0, 1]) # [16, 20] - - - - - - - - - - diff --git a/doc/paddle/api/paddle/tensor/math/round_cn.rst b/doc/paddle/api/paddle/tensor/math/round_cn.rst deleted file mode 100644 index 7de4214b7..000000000 --- a/doc/paddle/api/paddle/tensor/math/round_cn.rst +++ /dev/null @@ -1,54 +0,0 @@ -.. _cn_api_fluid_layers_round: - -round -------------------------------- - -.. py:function:: paddle.fluid.layers.round(x, name=None) - -:alias_main: paddle.round -:alias: paddle.round,paddle.tensor.round,paddle.tensor.math.round -:old_api: paddle.fluid.layers.round - - - - -该OP将输入中的数值四舍五入到最接近的整数数值。 - -.. code-block:: python - - 输入: - x.shape = [4] - x.data = [1.2, -0.9, 3.4, 0.9] - - 输出: - out.shape = [4] - Out.data = [1., -1., 3., 1.] - -参数: - - - **x** (Variable) - 支持任意维度的Tensor。数据类型为float32,float64或float16。 - - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - -返回:返回类型为Variable(Tensor|LoDTensor), 数据类型同输入一致。 - -**代码示例**: - -.. code-block:: python - - import numpy as np - import paddle.fluid as fluid - - inputs = fluid.layers.data(name="x", shape = [3], dtype='float32') - output = fluid.layers.round(inputs) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - img = np.array([1.2, -0.9, 3.4, 0.9]).astype(np.float32) - - res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) - print(res) - # [array([ 1., -1., 3., 1.], dtype=float32)] - - - diff --git a/doc/paddle/api/paddle/tensor/math/rsqrt_cn.rst b/doc/paddle/api/paddle/tensor/math/rsqrt_cn.rst deleted file mode 100644 index 0d4a83041..000000000 --- a/doc/paddle/api/paddle/tensor/math/rsqrt_cn.rst +++ /dev/null @@ -1,39 +0,0 @@ -.. _cn_api_fluid_layers_rsqrt: - -rsqrt -------------------------------- - -.. py:function:: paddle.fluid.layers.rsqrt(x, name=None) - -:alias_main: paddle.rsqrt -:alias: paddle.rsqrt,paddle.tensor.rsqrt,paddle.tensor.math.rsqrt -:old_api: paddle.fluid.layers.rsqrt - - - -该OP为rsqrt激活函数。 - -注:输入x应确保为非 **0** 值,否则程序会抛异常退出。 - -其运算公式如下: - -.. math:: - out = \frac{1}{\sqrt{x}} - - -参数: - - **x** (Variable) – 输入是多维Tensor或LoDTensor,数据类型可以是float32和float64。 - - **name** (str,可选)— 这一层的名称(可选)。如果设置为None,则将自动命名这一层。默认值为None。 - -返回:对输入x进行rsqrt激活函数计算后的Tensor或LoDTensor,数据shape和输入x的shape一致。 - -返回类型:Variable,数据类型和输入数据类型一致。 - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - data = fluid.data(name="input", shape=[32, 784]) - result = fluid.layers.rsqrt(data) - diff --git a/doc/paddle/api/paddle/tensor/math/scale_cn.rst b/doc/paddle/api/paddle/tensor/math/scale_cn.rst deleted file mode 100644 index 6623f9e45..000000000 --- a/doc/paddle/api/paddle/tensor/math/scale_cn.rst +++ /dev/null @@ -1,77 +0,0 @@ -.. _cn_api_fluid_layers_scale: - -scale -------------------------------- - -.. py:function:: paddle.fluid.layers.scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None) - -:alias_main: paddle.scale -:alias: paddle.scale,paddle.tensor.scale,paddle.tensor.math.scale -:old_api: paddle.fluid.layers.scale - - - -缩放算子。 - -对输入Tensor进行缩放和偏置,其公式如下: - -``bias_after_scale`` 为True: - -.. math:: - Out=scale*X+bias - -``bias_after_scale`` 为False: - -.. math:: - Out=scale*(X+bias) - -参数: - - **x** (Variable) - 要进行缩放的多维Tensor,数据类型可以为float32,float64,int8,int16,int32,int64,uint8。 - - **scale** (float|Variable) - 缩放的比例,是一个float类型或者一个shape为[1],数据类型为float32的Variable类型。 - - **bias** (float) - 缩放的偏置。 - - **bias_after_scale** (bool) - 判断在缩放之前或之后添加偏置。为True时,先缩放再偏置;为False时,先偏置再缩放。该参数在某些情况下,对数值稳定性很有用。 - - **act** (str,可选) - 应用于输出的激活函数,如tanh、softmax、sigmoid、relu等。 - - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - -返回: 缩放后的输出Tensor。 - -返回类型: Variable(Tensor|LoDTensor)。 - -**代码示例:** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32') - output = fluid.layers.scale(inputs, scale = 2.0, bias = 1.0) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - - res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) - print(res) # [array([[ 3., 5., 7.], [ 9., 11., 13.]], dtype=float32)] - -.. code-block:: python - - # scale with parameter scale as Variable - import paddle.fluid as fluid - import numpy as np - - inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32') - scale = fluid.layers.data(name="scale", shape=[1], dtype='float32', - append_batch_size=False) - output = fluid.layers.scale(inputs, scale = scale, bias = 1.0) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - scale_np = np.array([2.]).astype(np.float32) - - res = exe.run(fluid.default_main_program(), feed={'x':img, 'scale':scale_np}, fetch_list=[output]) - print(res) # [array([[ 3., 5., 7.], [ 9., 11., 13.]], dtype=float32)] - diff --git a/doc/paddle/api/paddle/tensor/math/sign_cn.rst b/doc/paddle/api/paddle/tensor/math/sign_cn.rst deleted file mode 100644 index 5559e7a7b..000000000 --- a/doc/paddle/api/paddle/tensor/math/sign_cn.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. _cn_api_fluid_layers_sign: - -sign -------------------------------- - -.. py:function:: paddle.fluid.layers.sign(x) - -:alias_main: paddle.sign -:alias: paddle.sign,paddle.tensor.sign,paddle.tensor.math.sign -:old_api: paddle.fluid.layers.sign - - - -此OP对输入x中每个元素进行正负判断,并且输出正负判断值:1代表正,-1代表负,0代表零。 - -参数: - - **x** (Variable|numpy.ndarray) – 进行正负值判断的多维Tensor或者是多维的numpy数组,数据类型为 float32,float64。 - -返回:输出正负号Tensor,数据的shape大小和输入x的数据shape一致。 - -返回类型:Variable,数据类型和输入数据类型一致。 - -**代码示例** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - - data = fluid.layers.sign(np.array([3.0, 0.0, -2.0], dtype='float32')) - # data=[1.0, 0.0, -1.0] - diff --git a/doc/paddle/api/paddle/tensor/math/square_cn.rst b/doc/paddle/api/paddle/tensor/math/square_cn.rst deleted file mode 100644 index 8c2a73306..000000000 --- a/doc/paddle/api/paddle/tensor/math/square_cn.rst +++ /dev/null @@ -1,44 +0,0 @@ -.. _cn_api_fluid_layers_square: - -square -------------------------------- - -.. py:function:: paddle.fluid.layers.square(x,name=None) - -:alias_main: paddle.square -:alias: paddle.square,paddle.tensor.square,paddle.tensor.math.square -:old_api: paddle.fluid.layers.square - - - -该OP执行逐元素取平方运算。 - -.. math:: - out = x^2 - -参数: - - **x** (Variable) - 任意维度的Tensor,支持的数据类型: float32,float64。 - - **name** (str,可选) - 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - -返回:返回取平方后的Tensor,维度和数据类型同输入一致。 - -返回类型:Variable - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - data = fluid.layers.data(name="input", shape=[32, 784], dtype='float32') - result = fluid.layers.square(data) #result.shape=[32, 784], type=float32 - - - - - - - - - - - diff --git a/doc/paddle/api/paddle/tensor/math/stanh_cn.rst b/doc/paddle/api/paddle/tensor/math/stanh_cn.rst deleted file mode 100644 index d59206009..000000000 --- a/doc/paddle/api/paddle/tensor/math/stanh_cn.rst +++ /dev/null @@ -1,51 +0,0 @@ -.. _cn_api_fluid_layers_stanh: - -stanh -------------------------------- - -.. py:function:: paddle.fluid.layers.stanh(x, scale_a=0.67, scale_b=1.7159, name=None) - -:alias_main: paddle.stanh -:alias: paddle.stanh,paddle.tensor.stanh,paddle.tensor.math.stanh -:old_api: paddle.fluid.layers.stanh - - - -STanh 激活算子(STanh Activation Operator.) - -.. math:: - \\out=b*\frac{e^{a*x}-e^{-a*x}}{e^{a*x}+e^{-a*x}}\\ - -参数: - - **x** (Tensor|LoDTensor) - 数据类型为float32,float64。STanh operator的输入 - - **scale_a** (float) - 输入的a的缩放参数 - - **scale_b** (float) - b的缩放参数 - - **name** (str|None) - 这个层的名称(可选)。如果设置为None,该层将被自动命名 - -返回: 与输入shape相同的张量 - -返回类型: Variable(Tensor),数据类型为float32的Tensor。 - -**代码示例:** - -.. code-block:: python - - import paddle.fluid as fluid - import numpy as np - data = fluid.layers.data(name="input", shape=[-1, 3]) - result = fluid.layers.stanh(data,scale_a=0.67, scale_b=1.72) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - x = np.random.random(size=(3, 3)).astype('float32') - output= exe.run(feed={"input": x}, - fetch_list=[result]) - print(output) - """ - output: - [array([[0.626466 , 0.89842904, 0.7501062 ], - [0.25147712, 0.7484996 , 0.22902708], - [0.62705994, 0.23110689, 0.56902856]], dtype=float32)] - """ - - diff --git a/doc/paddle/api/paddle/tensor/math/sum_cn.rst b/doc/paddle/api/paddle/tensor/math/sum_cn.rst old mode 100644 new mode 100755 index 89825e0b4..0dc93e01c --- a/doc/paddle/api/paddle/tensor/math/sum_cn.rst +++ b/doc/paddle/api/paddle/tensor/math/sum_cn.rst @@ -1,49 +1,81 @@ -.. _cn_api_tensor_sum: +.. _cn_api_fluid_layers_sum: sum ------------------------------- -.. py:function:: paddle.sum(input, dim=None, dtype=None, keep_dim=False, name=None) +.. py:function:: paddle.fluid.layers.sum(x) -:alias_main: paddle.sum -:alias: paddle.sum,paddle.tensor.sum,paddle.tensor.math.sum -:update_api: paddle.fluid.layers.reduce_sum -该OP是对指定维度上的Tensor元素进行求和运算,并输出相应的计算结果。 +该OP用于对输入的一至多个Tensor或LoDTensor求和。如果输入的是LoDTensor,输出仅与第一个输入共享LoD信息(序列信息)。 + +例1: +:: + 输入: + input.shape = [2, 3] + input = [[1, 2, 3], + [4, 5, 6]] + + 输出: + output.shape = [2, 3] + output = [[1, 2, 3], + [4, 5, 6]] + +例2: +:: + 输入: + 第一个输入: + input1.shape = [2, 3] + input1 = [[1, 2, 3], + [4, 5, 6]] + + 第二个输入: + input2.shape = [2, 3] + input2 = [[7, 8, 9], + [10, 11, 12]] + + 输出: + output.shape = [2, 3] + output = [[8, 10, 12], + [14, 16, 18]] 参数: - - **input** (Variable)- 输入变量为多维Tensor或LoDTensor,支持数据类型为float32,float64,int32,int64。 - - **dim** (list | int ,可选)- 求和运算的维度。如果为None,则计算所有元素的和并返回包含单个元素的Tensor变量,否则必须在 :math:`[−rank(input),rank(input)]` 范围内。如果 :math:`dim [i] <0` ,则维度将变为 :math:`rank+dim[i]` ,默认值为None。 - - **dtype** (str , 可选)- 输出变量的数据类型。若参数为空,则输出变量的数据类型和输入变量相同,默认值为None。 - - **keep_dim** (bool)- 是否在输出Tensor中保留减小的维度。如 keep_dim 为true,否则结果张量的维度将比输入张量小,默认值为False。 - - **name** (str , 可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - -返回: 在指定dim上进行求和运算的Tensor,数据类型和输入数据类型一致。 - -返回类型: 变量(Variable) - -**代码示例** - -.. code-block:: python - - import paddle - import paddle.fluid as fluid - # x是一个Tensor,元素如下: - # [[0.2, 0.3, 0.5, 0.9] - # [0.1, 0.2, 0.6, 0.7]] - # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 - x = fluid.data(name='x', shape=[2, 4], dtype='float32') - out1 = paddle.sum(x) # [3.5] - out2 = paddle.sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6] - out3 = paddle.sum(x, dim=-1) # [1.9, 1.6] - out4 = paddle.sum(x, dim=1, keep_dim=True) # [[1.9], [1.6]] - - # y 是一个shape为[2, 2, 2]的Tensor元素如下: - # [[[1, 2], [3, 4]], - # [[5, 6], [7, 8]]] - # 接下来的示例中,我们在每处函数调用后面都标注出了它的结果张量。 - y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') - out5 = paddle.sum(y, dim=[1, 2]) # [10, 26] - out6 = paddle.sum(y, dim=[0, 1]) # [16, 20] + **x** (Variable|list(Variable)) - 输入的一至多个Variable。如果输入了多个Variable,则不同Variable间的shape和数据类型应保持一致。Variable为多维Tensor或LoDTensor,数据类型支持:float32,float64,int32,int64 + +返回:对输入 ``x`` 中的Variable求和后的结果,shape和数据类型与 ``x`` 一致 + +返回类型:Variable + + +**代码示例:** + +.. code-block:: python + + import paddle.fluid as fluid + input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5) + input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3) + sum = fluid.layers.sum([input0, input1]) + + #用户可以通过executor打印出求和的结果 + out = fluid.layers.Print(sum, message="the sum of input0 and input1: ") + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_main_program()) + + #打印出的数据为: + 1570701754 the sum of input0 and input1: The place is:CPUPlace + Tensor[sum_0.tmp_0] + shape: [2,3,] + dtype: l + data: 8,8,8,8,8,8, + + #输出了shape为[2,3]的Tensor,与输入的shape一致 + #dtype为对应C++数据类型,在不同环境下可能显示值不同,但本质相同 + #例如:如果Tensor中数据类型是int64,则对应的C++数据类型为int64_t,所以dtype值为typeid(int64_t).name(), + # 其在MacOS下为'x',linux下为'l',Windows下为'__int64',都表示64位整型变量 + + + + + + diff --git a/doc/paddle/api/paddle/tensor/math/sums_cn.rst b/doc/paddle/api/paddle/tensor/math/sums_cn.rst deleted file mode 100644 index 7bec1960d..000000000 --- a/doc/paddle/api/paddle/tensor/math/sums_cn.rst +++ /dev/null @@ -1,60 +0,0 @@ -.. _cn_api_fluid_layers_sums: - -sums -------------------------------- - -.. py:function:: paddle.fluid.layers.sums(input,out=None) - -:alias_main: paddle.sums -:alias: paddle.sums,paddle.tensor.sums,paddle.tensor.math.sums -:old_api: paddle.fluid.layers.sums - - - -该OP计算多个输入Tensor逐个元素相加的和。 - -- 示例:3个Tensor求和 - -.. code-block:: python - - 输入: - x0.shape = [2, 3] - x0.data = [[1., 2., 3.], - [4., 5., 6.]] - x1.shape = [2, 3] - x1.data = [[10., 20., 30.], - [40., 50., 60.]] - x2.shape = [2, 3] - x2.data = [[100., 200., 300.], - [400., 500., 600.]] - - 输出: - out.shape = [2, 3] - out.data = [[111., 222., 333.], - [444., 555., 666.]] - - -参数: - - **input** (list) - 多个维度相同的Tensor组成的元组。支持的数据类型:float32,float64,int32,int64。 - - **out** (Variable,可选) - 指定求和的结果Tensor,可以是程序中已经创建的任何Variable。默认值为None,此时将创建新的Variable来保存输出结果。 - -返回:输入的和,数据类型和维度与输入Tensor相同。若 ``out`` 为 ``None`` ,返回值是一个新的Variable;否则,返回值就是 ``out`` 。 - -返回类型:Variable - -**代码示例**: - -.. code-block:: python - - import paddle.fluid as fluid - - x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1) - x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2) - x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3) - x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0) - - # 多个Tensor求和,结果保存在一个新建的Variable sum0,即sum0=x0+x1+x2,值为[[6, ..., 6], ..., [6, ..., 6]] - sum0 = fluid.layers.sums(input=[x0, x1, x2]) - - # 多个Tensor求和,sum1和x3是同一个Variable,相当于x3=x0+x1+x2,值为[[6, ..., 6], ..., [6, ..., 6]] - sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3) diff --git a/doc/paddle/api/paddle/tensor/math/tanh_cn.rst b/doc/paddle/api/paddle/tensor/math/tanh_cn.rst deleted file mode 100644 index 84e295cb7..000000000 --- a/doc/paddle/api/paddle/tensor/math/tanh_cn.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. _cn_api_tensor_tanh: - -tanh -------------------------------- - -.. py:function:: paddle.tanh(x, name=None, out=None) - -:alias_main: paddle.tanh -:alias: paddle.tanh,paddle.tensor.tanh,paddle.tensor.math.tanh -:update_api: paddle.fluid.layers.tanh - - - -tanh 激活函数 - -.. math:: - out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}} - - -参数: - - - **x** (Variable) - 支持任意维度的Tensor。数据类型为float32,float64或float16。 - - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - - **out** (Variable, 可选) – 指定存储运算结果的Tensor。如果设置为None或者不设置,将创建新的Tensor存储运算结果,默认值为None。 - -返回:返回类型为Variable(Tensor|LoDTensor), 数据类型同输入一致。 - -**代码示例**: - -.. code-block:: python - - import numpy as np - import paddle - import paddle.fluid as fluid - - inputs = fluid.layers.data(name="x", shape = [3], dtype='float32') - output = paddle.tanh(inputs) - - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - - img = np.array([0, 0.5, 0.3]).astype(np.float32) - - res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) - print(res) - # [array([0., 0.46211717, 0.2913126], dtype=float32)] diff --git a/doc/paddle/api/paddle/tensor/search/argmax_cn.rst b/doc/paddle/api/paddle/tensor/search/argmax_cn.rst index 2b3a7857b..d165ce8d6 100644 --- a/doc/paddle/api/paddle/tensor/search/argmax_cn.rst +++ b/doc/paddle/api/paddle/tensor/search/argmax_cn.rst @@ -1,26 +1,20 @@ -.. _cn_api_tensor_argmax: +.. _cn_api_fluid_layers_argmax: argmax ------------------------------- -.. py:function:: paddle.argmax(input, axis=None, dtype=None, out=None, keepdims=False, name=None) +.. py:function:: paddle.fluid.layers.argmax(x, axis=0) -:alias_main: paddle.argmax -:alias: paddle.argmax,paddle.tensor.argmax,paddle.tensor.search.argmax -:update_api: paddle.fluid.layers.argmax +**argmax** -该OP沿 ``axis`` 计算输入 ``input`` 的最大元素的索引。 +该OP沿 ``axis`` 计算输入 ``x`` 的最大元素的索引。 参数: - - **input** (Variable) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int8、int16、int32、int64。 - - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``input`` 的Rank, ``axis`` -R与绝对值相同的R等价。默认值为0。 - - **dtype** (np.dtype|core.VarDesc.VarType|str)- 输出Tensor的数据类型,可选值为int32,int64,默认值为None,将返回int64类型的结果。 - - **out** (Variable, 可选) – 指定存储运算结果的Tensor。如果设置为None或者不设置,将创建新的Tensor存储运算结果,默认值为None。 - - **keepdims** (bool,可选)- 是否保留进行max index操作的维度,默认值为False。 - - **name** (str,可选) – 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 + - **x** (Variable) - 输入的多维 ``Tensor`` ,支持的数据类型:float32、float64、int8、int16、int32、int64。 + - **axis** (int,可选) - 指定对输入Tensor进行运算的轴, ``axis`` 的有效范围是[-R, R),R是输入 ``x`` 的Rank, ``axis`` 为负时与 ``axis`` +R 等价。默认值为0。 返回: ``Tensor`` ,数据类型int64 @@ -30,40 +24,31 @@ argmax .. code-block:: python - import paddle - import paddle.fluid as fluid - import numpy as np - - in1 = np.array([[[5,8,9,5], - [0,0,1,7], - [6,9,2,4]], - [[5,2,4,2], - [4,7,7,9], - [1,7,0,6]]]) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(in1) - out1 = paddle.argmax(input=x, axis=-1) - out2 = paddle.argmax(input=x, axis=0) - out3 = paddle.argmax(input=x, axis=1) - out4 = paddle.argmax(input=x, axis=2) - out5 = paddle.argmax(input=x, axis=2, keepdims=True) - print(out1.numpy()) - # [[2 3 1] - # [0 3 1]] - print(out2.numpy()) - # [[0 0 0 0] - # [1 1 1 1] - # [0 0 0 1]] - print(out3.numpy()) - # [[2 2 0 1] - # [0 1 1 1]] - print(out4.numpy()) - # [[2 3 1] - # [0 3 1]] - print(out5.numpy()) - #array([[[2], - # [3], - # [1]], - # [[0], - # [3], - # [1]]]) + import paddle.fluid as fluid + import numpy as np + + in1 = np.array([[[5,8,9,5], + [0,0,1,7], + [6,9,2,4]], + [[5,2,4,2], + [4,7,7,9], + [1,7,0,6]]]) + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(in1) + out1 = fluid.layers.argmax(x=x, axis=-1) + out2 = fluid.layers.argmax(x=x, axis=0) + out3 = fluid.layers.argmax(x=x, axis=1) + out4 = fluid.layers.argmax(x=x, axis=2) + print(out1.numpy()) + # [[2 3 1] + # [0 3 1]] + print(out2.numpy()) + # [[0 0 0 0] + # [1 1 1 1] + # [0 0 0 1]] + print(out3.numpy()) + # [[2 2 0 1] + # [0 1 1 1]] + print(out4.numpy()) + # [[2 3 1] + # [0 3 1]] diff --git a/doc/paddle/api/paddle/tensor/search/where_cn.rst b/doc/paddle/api/paddle/tensor/search/where_cn.rst index bcfabfab3..d483b8fa9 100644 --- a/doc/paddle/api/paddle/tensor/search/where_cn.rst +++ b/doc/paddle/api/paddle/tensor/search/where_cn.rst @@ -1,53 +1,42 @@ -.. _cn_api_tensor_where: +.. _cn_api_fluid_layers_where: where ------------------------------- -.. py:function:: paddle.where(condition, x, y, name=None) +.. py:function:: paddle.fluid.layers.where(condition) -:alias_main: paddle.where -:alias: paddle.where,paddle.tensor.where,paddle.tensor.search.where -:update_api: paddle.fluid.layers.cond -该OP返回一个根据输入 ``condition``, 选择 ``x`` 或 ``y`` 的元素组成的多维 ``Tensor`` : - -.. math:: - Out_i = - \left\{ - \begin{aligned} - &X_i, & & if \ cond_i \ is \ True \\ - &Y_i, & & if \ cond_i \ is \ False \\ - \end{aligned} - \right. - +该OP计算输入元素中为True的元素在输入中的坐标(index)。 + 参数: - - **condition** (Variable)- 选择 ``x`` 或 ``y`` 元素的条件 。 - - **x** (Variable)- 多维 ``Tensor`` ,数据类型为 ``float32`` 或 ``float64`` 或 ``int32`` 或 ``int64`` 。 - - **y** (Variable)- 多维 ``Tensor`` ,数据类型为 ``float32`` 或 ``float64`` 或 ``int32`` 或 ``int64`` 。 - - **name** (str,可选)- 具体用法请参见 :ref:`api_guide_Name` ,一般无需设置,默认值为None。 - -返回:数据类型与 ``x`` 相同的 ``Tensor`` 。 + - **condition** (Variable)– 输入秩至少为1的多维Tensor,数据类型是bool类型。 -返回类型:Variable。 +返回:输出condition元素为True的坐标(index),将所有的坐标(index)组成一个2-D的Tensor。 - -**代码示例:** +返回类型:Variable,数据类型是int64。 + +**代码示例**: .. code-block:: python - import paddle - import numpy as np - import paddle.fluid as fluid + import paddle.fluid as fluid + import paddle.fluid.layers as layers + import numpy as np + # tensor 为 [True, False, True] + condition = layers.assign(np.array([1, 0, 1], dtype='int32')) + condition = layers.cast(condition, 'bool') + out = layers.where(condition) # [[0], [2]] + + # tensor 为 [[True, False], [False, True]] + condition = layers.assign(np.array([[1, 0], [0, 1]], dtype='int32')) + condition = layers.cast(condition, 'bool') + out = layers.where(condition) # [[0, 0], [1, 1]] - x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float32") - y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float32") + # tensor 为 [False, False, False] + condition = layers.assign(np.array([0, 0, 0], dtype='int32')) + condition = layers.cast(condition, 'bool') + out = layers.where(condition) # [[]] - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(x_i) - y = fluid.dygraph.to_variable(y_i) - out = paddle.where(x>1, x, y) - print(out.numpy()) - #out: [1.0, 1.0, 3.2, 1.2] diff --git a/doc/paddle/api/paddle/framework/to_variable_cn.rst b/doc/paddle/api/paddle/to_variable_cn.rst similarity index 100% rename from doc/paddle/api/paddle/framework/to_variable_cn.rst rename to doc/paddle/api/paddle/to_variable_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/topk_cn.rst b/doc/paddle/api/paddle/topk_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/topk_cn.rst rename to doc/paddle/api/paddle/topk_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/transpose_cn.rst b/doc/paddle/api/paddle/transpose_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/transpose_cn.rst rename to doc/paddle/api/paddle/transpose_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/unique_cn.rst b/doc/paddle/api/paddle/unique_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/unique_cn.rst rename to doc/paddle/api/paddle/unique_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/unique_with_counts_cn.rst b/doc/paddle/api/paddle/unique_with_counts_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/unique_with_counts_cn.rst rename to doc/paddle/api/paddle/unique_with_counts_cn.rst diff --git a/doc/paddle/api/paddle/fluid/layers/unstack_cn.rst b/doc/paddle/api/paddle/unstack_cn.rst similarity index 100% rename from doc/paddle/api/paddle/fluid/layers/unstack_cn.rst rename to doc/paddle/api/paddle/unstack_cn.rst -- GitLab