未验证 提交 a73e1f68 编写于 作者: S SunGaofeng 提交者: GitHub

fix document of 11 APIs (#20278)

* modify document of 11 APIs
test=develop
test=document_fix

* fix dtype to data type and description of name parameter
上级 7fb958b2
......@@ -174,7 +174,7 @@ paddle.fluid.layers.sequence_last_step (ArgSpec(args=['input'], varargs=None, ke
paddle.fluid.layers.sequence_slice (ArgSpec(args=['input', 'offset', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '39fbc5437be389f6c0c769f82fc1fba2'))
paddle.fluid.layers.dropout (ArgSpec(args=['x', 'dropout_prob', 'is_test', 'seed', 'name', 'dropout_implementation'], varargs=None, keywords=None, defaults=(False, None, None, 'downgrade_in_infer')), ('document', '392dd4bad607fd853f71fec71801044f'))
paddle.fluid.layers.split (ArgSpec(args=['input', 'num_or_sections', 'dim', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '78cf3a7323d1a7697658242e13f63759'))
paddle.fluid.layers.ctc_greedy_decoder (ArgSpec(args=['input', 'blank', 'input_length', 'padding_value', 'name'], varargs=None, keywords=None, defaults=(None, 0, None)), ('document', '9abb7bb8d267e017620a39a146dc47ea'))
paddle.fluid.layers.ctc_greedy_decoder (ArgSpec(args=['input', 'blank', 'input_length', 'padding_value', 'name'], varargs=None, keywords=None, defaults=(None, 0, None)), ('document', '31e0cbec2898efae95853034adadfe2b'))
paddle.fluid.layers.edit_distance (ArgSpec(args=['input', 'label', 'normalized', 'ignored_tokens', 'input_length', 'label_length'], varargs=None, keywords=None, defaults=(True, None, None, None)), ('document', '77cbfb28cd2fc589f589c7013c5086cd'))
paddle.fluid.layers.l2_normalize (ArgSpec(args=['x', 'axis', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-12, None)), ('document', 'c1df110ea65998984f564c5c10abc54a'))
paddle.fluid.layers.matmul (ArgSpec(args=['x', 'y', 'transpose_x', 'transpose_y', 'alpha', 'name'], varargs=None, keywords=None, defaults=(False, False, 1.0, None)), ('document', '3720b4a386585094435993deb028b592'))
......@@ -202,12 +202,12 @@ paddle.fluid.layers.unsqueeze (ArgSpec(args=['input', 'axes', 'name'], varargs=N
paddle.fluid.layers.lod_reset (ArgSpec(args=['x', 'y', 'target_lod'], varargs=None, keywords=None, defaults=(None, None)), ('document', '74498d37dd622ac472cb36887fce09ea'))
paddle.fluid.layers.lod_append (ArgSpec(args=['x', 'level'], varargs=None, keywords=None, defaults=None), ('document', '37663c7c179e920838a250ea0e28d909'))
paddle.fluid.layers.lrn (ArgSpec(args=['input', 'n', 'k', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(5, 1.0, 0.0001, 0.75, None)), ('document', 'fa565b65fb98d3ca82361c79f41b06b2'))
paddle.fluid.layers.pad (ArgSpec(args=['x', 'paddings', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '36b6e58678956585e5b30aa3de123a60'))
paddle.fluid.layers.pad_constant_like (ArgSpec(args=['x', 'y', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '95aa1972983f30fe9b5a3713e523e20f'))
paddle.fluid.layers.pad (ArgSpec(args=['x', 'paddings', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '46b3ada86dd2c79042dca90a55e08f66'))
paddle.fluid.layers.pad_constant_like (ArgSpec(args=['x', 'y', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '89aa122a50dc20ee116ae49d66854d20'))
paddle.fluid.layers.label_smooth (ArgSpec(args=['label', 'prior_dist', 'epsilon', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 0.1, 'float32', None)), ('document', '214f1dfbe95a628600bbe99e836319cf'))
paddle.fluid.layers.roi_pool (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1, 1, 1.0)), ('document', '6fc9bae94518bbf3e1a9e479f38f6537'))
paddle.fluid.layers.roi_align (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale', 'sampling_ratio', 'name'], varargs=None, keywords=None, defaults=(1, 1, 1.0, -1, None)), ('document', '3885fd76e122ac0563fa8369bcab7363'))
paddle.fluid.layers.dice_loss (ArgSpec(args=['input', 'label', 'epsilon'], varargs=None, keywords=None, defaults=(1e-05,)), ('document', '7e8e4bf1f0f8612961ed113e8af8f0c5'))
paddle.fluid.layers.dice_loss (ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-05, None)), ('document', '08d94daffbea3935178810bdc1633f07'))
paddle.fluid.layers.image_resize (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample', 'actual_shape', 'align_corners', 'align_mode', 'data_format'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR', None, True, 1, 'NCHW')), ('document', 'd29d829607b5ff12924197a3ba296c89'))
paddle.fluid.layers.image_resize_short (ArgSpec(args=['input', 'out_short_len', 'resample'], varargs=None, keywords=None, defaults=('BILINEAR',)), ('document', 'bd97ebfe4bdf5110a5fcb8ecb626a447'))
paddle.fluid.layers.resize_bilinear (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'align_mode', 'data_format'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 1, 'NCHW')), ('document', '44da7890c8a362a83a1c0902a1dc1e4d'))
......@@ -224,7 +224,7 @@ paddle.fluid.layers.mean_iou (ArgSpec(args=['input', 'label', 'num_classes'], va
paddle.fluid.layers.relu (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '0942c174f4f6fb274976d4357356f6a2'))
paddle.fluid.layers.selu (ArgSpec(args=['x', 'scale', 'alpha', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'f93c61f5b0bf933cd425a64dca2c4fdd'))
paddle.fluid.layers.log (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '02f668664e3bfc4df6c00d7363467140'))
paddle.fluid.layers.crop (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'ba3621917d5beffd3d022b88fbf6dc46'))
paddle.fluid.layers.crop (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '32196a194f757b4da114a595a5bc6414'))
paddle.fluid.layers.crop_tensor (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'd460aaf35afbbeb9beea4789aa6e4343'))
paddle.fluid.layers.rank_loss (ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '8eb36596bb43d7a907d3397c7aedbdb3'))
paddle.fluid.layers.margin_rank_loss (ArgSpec(args=['label', 'left', 'right', 'margin', 'name'], varargs=None, keywords=None, defaults=(0.1, None)), ('document', '6fc86ed23b420c8a0f6c043563cf3937'))
......@@ -272,7 +272,7 @@ paddle.fluid.layers.logical_and (ArgSpec(args=['x', 'y', 'out', 'name'], varargs
paddle.fluid.layers.logical_or (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '15adbc561618b7db69671e02009bea67'))
paddle.fluid.layers.logical_xor (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '77ccf37b710c507dd97e03f08ce8bb29'))
paddle.fluid.layers.logical_not (ArgSpec(args=['x', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '6e2fe8a322ec69811f6507d22acf8f9f'))
paddle.fluid.layers.clip (ArgSpec(args=['x', 'min', 'max', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '0ce33756573c572da67302499455dbcd'))
paddle.fluid.layers.clip (ArgSpec(args=['x', 'min', 'max', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '4ad0d96a149f023cb72199ded4ce6e9d'))
paddle.fluid.layers.clip_by_norm (ArgSpec(args=['x', 'max_norm', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a5f4917fda557ceb834168cdbec6d51b'))
paddle.fluid.layers.mean (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '597257fb94d0597c404a6a5c91ab5258'))
paddle.fluid.layers.mul (ArgSpec(args=['x', 'y', 'x_num_col_dims', 'y_num_col_dims', 'name'], varargs=None, keywords=None, defaults=(1, 1, None)), ('document', '784b7e36cea88493f9e37a41b10fbf4d'))
......@@ -294,7 +294,7 @@ paddle.fluid.layers.lstm (ArgSpec(args=['input', 'init_h', 'init_c', 'max_len',
paddle.fluid.layers.shuffle_channel (ArgSpec(args=['x', 'group', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '276a1213dd431228cefa33c3146df34a'))
paddle.fluid.layers.temporal_shift (ArgSpec(args=['x', 'seg_num', 'shift_ratio', 'name'], varargs=None, keywords=None, defaults=(0.25, None)), ('document', 'd5945431cdcae3cda21914db5bbf383e'))
paddle.fluid.layers.py_func (ArgSpec(args=['func', 'x', 'out', 'backward_func', 'skip_vars_in_backward_input'], varargs=None, keywords=None, defaults=(None, None)), ('document', '8404e472ac12b4a30a505d3d3a3e5fdb'))
paddle.fluid.layers.psroi_pool (ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '42d5155374f69786300d90d751956998'))
paddle.fluid.layers.psroi_pool (ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '9bf0cc6b0717010b8ceec5dc2541d566'))
paddle.fluid.layers.prroi_pool (ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(1.0, 1, 1, None)), ('document', '454c7ea8c73313dd41513929d7526303'))
paddle.fluid.layers.teacher_student_sigmoid_loss (ArgSpec(args=['input', 'label', 'soft_max_up_bound', 'soft_max_lower_bound'], varargs=None, keywords=None, defaults=(15.0, -15.0)), ('document', 'b0e07aa41caae04b07a8e8217cc96020'))
paddle.fluid.layers.huber_loss (ArgSpec(args=['input', 'label', 'delta'], varargs=None, keywords=None, defaults=None), ('document', '9d93ee81f7a3e526d68bb280bc695d6c'))
......@@ -306,7 +306,7 @@ paddle.fluid.layers.continuous_value_model (ArgSpec(args=['input', 'cvm', 'use_c
paddle.fluid.layers.where (ArgSpec(args=['condition'], varargs=None, keywords=None, defaults=None), ('document', '68810eedf448f2cb3abd46518dd46c39'))
paddle.fluid.layers.sign (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', '9f19288d9a8dabcfd0bbb4fc032fa521'))
paddle.fluid.layers.deformable_conv (ArgSpec(args=['input', 'offset', 'mask', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'deformable_groups', 'im2col_step', 'param_attr', 'bias_attr', 'modulated', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, None, None, True, None)), ('document', '3e090f9e90b9c24d07348243bf137b56'))
paddle.fluid.layers.unfold (ArgSpec(args=['x', 'kernel_sizes', 'strides', 'paddings', 'dilations', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None)), ('document', '3f884662ad443d9ecc2b3734b4f61ad6'))
paddle.fluid.layers.unfold (ArgSpec(args=['x', 'kernel_sizes', 'strides', 'paddings', 'dilations', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None)), ('document', 'f03cebb8a2ad0f128b8e86ccf399a0a3'))
paddle.fluid.layers.deformable_roi_pooling (ArgSpec(args=['input', 'rois', 'trans', 'no_trans', 'spatial_scale', 'group_size', 'pooled_height', 'pooled_width', 'part_size', 'sample_per_part', 'trans_std', 'position_sensitive', 'name'], varargs=None, keywords=None, defaults=(False, 1.0, [1, 1], 1, 1, None, 1, 0.1, False, None)), ('document', 'e0e7bf35da2287efb015546f1b8350df'))
paddle.fluid.layers.filter_by_instag (ArgSpec(args=['ins', 'ins_tag', 'filter_tag', 'is_lod'], varargs=None, keywords=None, defaults=None), ('document', '7703a2088af8de4128b143ff1164ca4a'))
paddle.fluid.layers.shard_index (ArgSpec(args=['input', 'index_num', 'nshards', 'shard_id', 'ignore_value'], varargs=None, keywords=None, defaults=(-1,)), ('document', '3c6b30e9cd57b38d4a5fa1ade887f779'))
......@@ -422,7 +422,7 @@ paddle.fluid.layers.rpn_target_assign (ArgSpec(args=['bbox_pred', 'cls_logits',
paddle.fluid.layers.retinanet_target_assign (ArgSpec(args=['bbox_pred', 'cls_logits', 'anchor_box', 'anchor_var', 'gt_boxes', 'gt_labels', 'is_crowd', 'im_info', 'num_classes', 'positive_overlap', 'negative_overlap'], varargs=None, keywords=None, defaults=(1, 0.5, 0.4)), ('document', '543b2a40641260e745a76b1f7a25fb2a'))
paddle.fluid.layers.sigmoid_focal_loss (ArgSpec(args=['x', 'label', 'fg_num', 'gamma', 'alpha'], varargs=None, keywords=None, defaults=(2, 0.25)), ('document', '4702891755596c8853aaeb874a5fdb46'))
paddle.fluid.layers.anchor_generator (ArgSpec(args=['input', 'anchor_sizes', 'aspect_ratios', 'variance', 'stride', 'offset', 'name'], varargs=None, keywords=None, defaults=(None, None, [0.1, 0.1, 0.2, 0.2], None, 0.5, None)), ('document', 'a7778d4f557c60dca52321673667690d'))
paddle.fluid.layers.roi_perspective_transform (ArgSpec(args=['input', 'rois', 'transformed_height', 'transformed_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1.0,)), ('document', 'a82016342789ba9d85737e405f824ff1'))
paddle.fluid.layers.roi_perspective_transform (ArgSpec(args=['input', 'rois', 'transformed_height', 'transformed_width', 'spatial_scale', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'b007f545ad41e66b814203bdb76516c6'))
paddle.fluid.layers.generate_proposal_labels (ArgSpec(args=['rpn_rois', 'gt_classes', 'is_crowd', 'gt_boxes', 'im_info', 'batch_size_per_im', 'fg_fraction', 'fg_thresh', 'bg_thresh_hi', 'bg_thresh_lo', 'bbox_reg_weights', 'class_nums', 'use_random', 'is_cls_agnostic', 'is_cascade_rcnn'], varargs=None, keywords=None, defaults=(256, 0.25, 0.25, 0.5, 0.0, [0.1, 0.1, 0.2, 0.2], None, True, False, False)), ('document', 'f2342042127b536a0a16390f149f1bba'))
paddle.fluid.layers.generate_proposals (ArgSpec(args=['scores', 'bbox_deltas', 'im_info', 'anchors', 'variances', 'pre_nms_top_n', 'post_nms_top_n', 'nms_thresh', 'min_size', 'eta', 'name'], varargs=None, keywords=None, defaults=(6000, 1000, 0.5, 0.1, 1.0, None)), ('document', '5cba014b41610431f8949e2d7336f1cc'))
paddle.fluid.layers.generate_mask_labels (ArgSpec(args=['im_info', 'gt_classes', 'is_crowd', 'gt_segms', 'rois', 'labels_int32', 'num_classes', 'resolution'], varargs=None, keywords=None, defaults=None), ('document', 'b319b10ddaf17fb4ddf03518685a17ef'))
......@@ -924,8 +924,8 @@ paddle.fluid.transpiler.RoundRobin.dispatch (ArgSpec(args=['self', 'varlist'], v
paddle.fluid.transpiler.RoundRobin.reset (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.transpiler.DistributeTranspilerConfig ('paddle.fluid.transpiler.distribute_transpiler.DistributeTranspilerConfig', ('document', 'beac6f89fe97eb8c66a25de5a09c56d2'))
paddle.fluid.transpiler.DistributeTranspilerConfig.__init__ (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.nets.simple_img_conv_pool (ArgSpec(args=['input', 'num_filters', 'filter_size', 'pool_size', 'pool_stride', 'pool_padding', 'pool_type', 'global_pooling', 'conv_stride', 'conv_padding', 'conv_dilation', 'conv_groups', 'param_attr', 'bias_attr', 'act', 'use_cudnn'], varargs=None, keywords=None, defaults=(0, 'max', False, 1, 0, 1, 1, None, None, None, True)), ('document', '13f01ff80e8dfbd3427d90cf49bc62eb'))
paddle.fluid.nets.sequence_conv_pool (ArgSpec(args=['input', 'num_filters', 'filter_size', 'param_attr', 'act', 'pool_type', 'bias_attr'], varargs=None, keywords=None, defaults=(None, 'sigmoid', 'max', None)), ('document', 'd6a1e527b53f5cc15594fee307dfc5cf'))
paddle.fluid.nets.simple_img_conv_pool (ArgSpec(args=['input', 'num_filters', 'filter_size', 'pool_size', 'pool_stride', 'pool_padding', 'pool_type', 'global_pooling', 'conv_stride', 'conv_padding', 'conv_dilation', 'conv_groups', 'param_attr', 'bias_attr', 'act', 'use_cudnn'], varargs=None, keywords=None, defaults=(0, 'max', False, 1, 0, 1, 1, None, None, None, True)), ('document', '5e89c978199c4ecce2b26d5fed1ec52b'))
paddle.fluid.nets.sequence_conv_pool (ArgSpec(args=['input', 'num_filters', 'filter_size', 'param_attr', 'act', 'pool_type', 'bias_attr'], varargs=None, keywords=None, defaults=(None, 'sigmoid', 'max', None)), ('document', 'b2d435f782ac8ea3ca480b8d24e7f5b4'))
paddle.fluid.nets.glu (ArgSpec(args=['input', 'dim'], varargs=None, keywords=None, defaults=(-1,)), ('document', 'b87bacfc70dd3477ed25ef14aa01389a'))
paddle.fluid.nets.scaled_dot_product_attention (ArgSpec(args=['queries', 'keys', 'values', 'num_heads', 'dropout_rate'], varargs=None, keywords=None, defaults=(1, 0.0)), ('document', 'b1a07a0000eb9103e3a143ca8c13de5b'))
paddle.fluid.nets.img_conv_group (ArgSpec(args=['input', 'conv_num_filter', 'pool_size', 'conv_padding', 'conv_filter_size', 'conv_act', 'param_attr', 'conv_with_batchnorm', 'conv_batchnorm_drop_rate', 'pool_stride', 'pool_type', 'use_cudnn'], varargs=None, keywords=None, defaults=(1, 3, None, None, False, 0.0, 1, 'max', True)), ('document', '6033b78da39b8b0ed302fbb0f67da502'))
......
......@@ -41,21 +41,22 @@ class ClipOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"(Tensor)The input of clip op."
"The number of dimensions must be between [1, 9].");
AddOutput("Out", "(Tensor)The output of clip op with shape as input(X)");
AddAttr<AttrType>(
"min", "(float)Minimum value, under which element is replaced by min.");
AddAttr<AttrType>(
"max", "(float)Maximum value, above which element is replaced by max");
"Tensor, the input of clip op, data type should be float32 or "
"float64.");
AddOutput(
"Out",
"Tensor, the clipped tensor, with the same shape and data type as "
"input(x)");
AddAttr<AttrType>("min", "float number, the minimum value to clip by.");
AddAttr<AttrType>("max", "float number, the maximum value to clip by.");
AddComment(R"DOC(
Clip Operator.
The clip operator limits the value of given input within an interval. The
interval is specified with arguments 'min' and 'max':
The clip operator limits the value of given input within an interval [min, max],
just as the following equation,
$$
Out = \min(\max(X, min), max)
Out = \MIN(\MAX(x, min), max)
$$
)DOC");
......@@ -106,6 +107,8 @@ REGISTER_OPERATOR(clip, ops::ClipOp, ops::ClipOpMaker<float>,
ops::ClipGradOpDescMaker, ops::ClipInplaceInferer);
REGISTER_OPERATOR(clip_grad, ops::ClipOpGrad, ops::ClipGradInplaceInferer);
REGISTER_OP_CPU_KERNEL(
clip, ops::ClipKernel<paddle::platform::CPUDeviceContext, float>);
clip, ops::ClipKernel<paddle::platform::CPUDeviceContext, float>,
ops::ClipKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
clip_grad, ops::ClipGradKernel<paddle::platform::CPUDeviceContext, float>);
clip_grad, ops::ClipGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::ClipGradKernel<paddle::platform::CPUDeviceContext, double>);
......@@ -16,6 +16,8 @@ limitations under the License. */
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
clip, ops::ClipKernel<paddle::platform::CUDADeviceContext, float>);
clip, ops::ClipKernel<paddle::platform::CUDADeviceContext, float>,
ops::ClipKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
clip_grad, ops::ClipGradKernel<paddle::platform::CUDADeviceContext, float>);
clip_grad, ops::ClipGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::ClipGradKernel<paddle::platform::CUDADeviceContext, double>);
......@@ -207,6 +207,8 @@ REGISTER_OPERATOR(crop, ops::CropOp, ops::CropOpMaker,
ops::CropGradOpDescMaker);
REGISTER_OPERATOR(crop_grad, ops::CropOpGrad);
REGISTER_OP_CPU_KERNEL(
crop, ops::CropKernel<paddle::platform::CPUDeviceContext, float>);
crop, ops::CropKernel<paddle::platform::CPUDeviceContext, float>,
ops::CropKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
crop_grad, ops::CropGradKernel<paddle::platform::CPUDeviceContext, float>);
crop_grad, ops::CropGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::CropGradKernel<paddle::platform::CPUDeviceContext, double>);
......@@ -15,6 +15,8 @@ limitations under the License. */
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
crop, ops::CropKernel<paddle::platform::CUDADeviceContext, float>);
crop, ops::CropKernel<paddle::platform::CUDADeviceContext, float>,
ops::CropKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
crop_grad, ops::CropGradKernel<paddle::platform::CUDADeviceContext, float>);
crop_grad, ops::CropGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::CropGradKernel<paddle::platform::CUDADeviceContext, double>);
......@@ -25,14 +25,14 @@ class PSROIPoolOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"(Tensor), "
"Tensor, "
"the input of PSROIPoolOp. "
"The format of input tensor is NCHW. Where N is the batch size, "
"C is the number of input channels, "
"H is the height of the input feature map, and "
"W is the width.");
"W is the width. The data type can be float32 or float64");
AddInput("ROIs",
"(LoDTensor), "
"LoDTensor, "
"ROIs (Regions of Interest) to pool over. "
"should be a 2-D LoDTensor of shape (num_rois, 4) "
"given as [(x1, y1, x2, y2), ...]. "
......@@ -40,9 +40,10 @@ class PSROIPoolOpMaker : public framework::OpProtoAndCheckerMaker {
"(x2, y2) is the bottom right coordinates. "
"The roi batch index can be calculated from LoD.");
AddOutput("Out",
"(Tensor), "
"Tensor, "
"the output of PSROIPoolOp is a 4-D Tensor with shape "
"(num_rois, output_channels, pooled_h, pooled_w).");
"(num_rois, output_channels, pooled_h, pooled_w). "
"The data type is the same as `x` ");
AddAttr<int>(
"output_channels",
"(int), "
......@@ -64,7 +65,7 @@ class PSROIPoolOpMaker : public framework::OpProtoAndCheckerMaker {
"the pooled output width.")
.SetDefault(1);
AddComment(R"Doc(
**PSROIPool Operator**
**PSROIPool Operator,** `rois` **of this op should be a LoDTensor**
Position sensitive region of interest pooling (also known as PSROIPooling) is to perform
position-sensitive average pooling on regions of interest specified by input, takes as
......
......@@ -2226,44 +2226,55 @@ def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0):
spatial_scale=1.0,
name=None):
"""
ROI perspective transform op.
**The** `rois` **of this op should be a LoDTensor.**
Args:
input (Variable): The input of ROIPerspectiveTransformOp. The format of
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature.
rois (Variable): ROIs (Regions of Interest) to be transformed. It should be
a 2-D LoDTensor of shape (num_rois, 8). Given as
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates.
transformed_height (integer): The height of transformed output.
transformed_width (integer): The width of transformed output.
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
tuple: A tuple with three Variables. (out, mask, transform_matrix)
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w).
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w).
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9).
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[256, 28, 28], dtype='float32')
rois = fluid.layers.data(name='rois', shape=[8], lod_level=1, dtype='float32')
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
helper = LayerHelper('roi_perspective_transform', **locals())
......
......@@ -6745,19 +6745,23 @@ def ctc_greedy_decoder(input,
padding_value=0,
name=None):
"""
This op is used to decode sequences by greedy policy by below steps:
This op is used to decode sequences by greedy policy by the following steps:
1. Get the indexes of max value for each row in input. a.k.a.
1. Get the indexes of maximum value for each row in input. a.k.a.
numpy.argmax(input, axis=0).
2. For each sequence in result of step1, merge repeated tokens between two
blanks and delete all blanks.
This op is implemented in two modes: lod and padding, either of them can be used.
The input can be either LoDTensor or Tensor, corresponding to lod and padding
mode respectively.
A simple example as below:
.. code-block:: text
Given:
for lod mode:
(1) for lod mode:
input.data = [[0.6, 0.1, 0.3, 0.1],
[0.3, 0.2, 0.4, 0.1],
......@@ -6786,7 +6790,7 @@ def ctc_greedy_decoder(input,
output.lod = [[2, 1]]
for padding mode:
(2) for padding mode:
input.data = [[[0.6, 0.1, 0.3, 0.1],
[0.3, 0.2, 0.4, 0.1],
......@@ -6813,45 +6817,54 @@ def ctc_greedy_decoder(input,
output_length.data = [[2], [1]]
Parameters:
Args:
input(Variable): (LoDTensor<float>), the probabilities of
variable-length sequences. When in lod mode, it is a 2-D Tensor with
LoD information. It's shape is [Lp, num_classes + 1]
input(Variable): the probabilities of variable-length sequences. When in lod mode,
it is a 2-D LoDTensor with LoD information. It's shape is [Lp, num_classes + 1]
where Lp is the sum of all input sequences' length and
num_classes is the true number of classes. When in padding mode,
it is a 3-D Tensor with padding, It's shape is [batch_size, N, num_classes + 1].
(not including the blank label).
(not including the blank label). The data type can be float32 or float64.
blank(int): the blank label index of Connectionist Temporal
Classification (CTC) loss, which is in thehalf-opened
Classification (CTC) loss, which is in the half-opened
interval [0, num_classes + 1).
input_length(Variable, optional): (LoDTensor<int>), shape is [batch_size, 1], when in lod mode, input_length
is None.
input_length(Variable, optional): 2-D LoDTensor, shape is [batch_size, 1], data type is int64.
It is used for padding mode. In lod mode, input_length is None.
padding_value(int): padding value.
name (str, optional): The name of this layer. It is optional.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
output(Variable): For lod mode, CTC greedy decode result which is a 2-D tensor with shape [Lp, 1]. \
'Lp' is the sum if all output sequences' length. If all the sequences \
in result were empty, the result LoDTensor will be [-1] with \
LoD [[]] and dims [1, 1]. For padding mode, CTC greedy decode result is a 2-D tensor \
with shape [batch_size, N], output length's shape is [batch_size, 1] which is length \
of every sequence in output.
output_length(Variable, optional): length of each sequence of output for padding mode.
For lod mode, returns the result of CTC greedy decoder, 2-D LoDTensor, shape is [Lp, 1], \
data type is int64. 'Lp' is the sum of all output sequences' length. If all the sequences \
in result were empty, the result LoDTensor will be [-1] with empty \
LoD [[]].
For padding mode, returns a tuple of (output, output_length), which was describled as below:
output, 2-D Tensor, shape is [batch_size, N], data type is int64.
output_length, 2-D Tensor, shape is [batch_size, 1], data type is int64. It is the length of \
each sequence of output for padding mode.
Return type:
For lod mode: Variable
For padding mode: tuple of two Variables (output, output_length).
Examples:
.. code-block:: python
# for lod mode
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[8], dtype='float32')
x = fluid.data(name='x', shape=[None, 8], dtype='float32', lod_level=1)
cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0)
# for padding mode
x_pad = fluid.layers.data(name='x_pad', shape=[4,8], dtype='float32')
x_pad_len = fluid.layers.data(name='x_pad_len', shape=[1], dtype='int64')
x_pad = fluid.data(name='x_pad', shape=[10, 4, 8], dtype='float32')
x_pad_len = fluid.data(name='x_pad_len', shape=[10, 1], dtype='int64')
out, out_len = fluid.layers.ctc_greedy_decoder(input=x_pad, blank=0,
input_length=x_pad_len)
......@@ -8778,13 +8791,13 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None):
def pad(x, paddings, pad_value=0., name=None):
"""
Pads a tensor with a constant value given by :attr:`pad_value`, and the
padded width is specified by :attr:`paddings`.
This op will pad a tensor with a constant value given by :attr:`pad_value`, and the
padded shape is specified by :attr:`paddings`.
Specifically, the number of values padded before the contents of :attr:`x`
in dimension :attr:`i` is indicated by :attr:`paddings[2i]`, and the number
of values padded after the contents of :attr:`x` in dimension :attr:`i` is
indicated by :attr:`paddings[2i+1]`.
Specifically, the number of values padded before the elements of :attr:`x`
in dimension :attr:`i` is indicated by :attr:`paddings[2*i]`, and the number
of values padded after the elements of :attr:`x` in dimension :attr:`i` is
indicated by :attr:`paddings[2*i+1]`.
See below for an example.
......@@ -8804,24 +8817,29 @@ def pad(x, paddings, pad_value=0., name=None):
[0, 0, 0, 0, 0]]
Args:
x (Variable): The input tensor variable.
x (Variable): Tensor, data type is float32.
paddings (list): A list of integers. Its elements specify the padded
width before and after for each dimension in turn.
The length of :attr:paddings must be
width before and after each dimension in turn.
The length of :attr:`paddings` must be equal to
:math:`rank(x) \\times 2`.
pad_value (float): The constant value used to pad.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The padded tensor variable.
The padded tensor, with the same data type and rank as :attr:`x`
Return Type:
Variable
Examples:
.. code-block:: python
# x is a rank 2 tensor variable.
# x is a rank 2 tensor variable with shape [100, 224].
# out will be a tensor of shape [101, 227]
import paddle.fluid as fluid
x = fluid.layers.data(name='data', shape=[224], dtype='float32')
x = fluid.data(name='data', shape=[100, 224], dtype='float32')
out = fluid.layers.pad(
x=x, paddings=[0, 1, 1, 2], pad_value=0.)
"""
......@@ -8839,11 +8857,10 @@ def pad(x, paddings, pad_value=0., name=None):
def pad_constant_like(x, y, pad_value=0., name=None):
"""
Pad input(Y) with :attr:`pad_value`, the number of values padded to
Pad :attr:`y` with :attr:`pad_value`, the number of values padded to
the edges of each axis is specified by the difference of the shape
of X and Y. ((0, shape_x_0 - shape_y_0), ... (0, shape_x_n - shape_y_n))
unique pad widths for each axis. The input should be a k-D
tensor(k > 0 and k < 7).
of :attr:`x` and :attr:`y` . ((0, shape_x_0 - shape_y_0), ... (0, shape_x_n - shape_y_n))
specify padding widths for each axis. The input should be a k-D tensor(k > 0 and k < 7).
See below for an example.
......@@ -8887,14 +8904,19 @@ def pad_constant_like(x, y, pad_value=0., name=None):
Out.shape = (2, 3, 2, 3)
Args:
x (Variable): The input tensor variable.
y (Variable): The input tensor variable.
x (Variable): Tensor, its shape spicifies the shape of output.
y (Variable): Tensor, its rank is the same with :attr:`x`, and for each dimension :math:`i` ,
:math:`y\_shape[i] <= x\_shape[i]` . The data type can be float32 or float64.
pad_value (float): The constant value used to pad.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The padded tensor variable.
The padded tensor, with the same shape as :attr:`x` and the same data type as :attr:`y`
Return Type:
Variable
Examples:
.. code-block:: python
......@@ -8902,8 +8924,8 @@ def pad_constant_like(x, y, pad_value=0., name=None):
# x is a rank 4 tensor variable, x.shape = (2, 3, 2, 3)
# y is a rank 4 tensor variable, y.shape = (1, 3, 1, 3)
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[2,3,2,3], dtype='float32')
y = fluid.layers.data(name='y', shape=[1,3,1,3], dtype='float32')
x = fluid.data(name='x', shape=[2,3,2,3], dtype='float32')
y = fluid.data(name='y', shape=[1,3,1,3], dtype='float32')
out = fluid.layers.pad_constant_like(x=x, y=y, pad_value=0.)
# out is a rank 4 tensor variable, and out.shape = [2, 3 ,2 , 3]
"""
......@@ -9124,11 +9146,12 @@ def roi_align(input,
return align_out
def dice_loss(input, label, epsilon=0.00001):
def dice_loss(input, label, epsilon=0.00001, name=None):
"""
Dice loss for comparing the similarity of two batch of data,
usually is used for binary image segmentation i.e. labels are binary.
The dice loss can be defined as below equation:
Dice loss for comparing the similarity between the input predictions and the label.
This implementation is for binary classification, where the input is sigmoid
predictions of each pixel, usually used for segmentation task. The dice loss can
be defined as the following equation:
.. math::
......@@ -9137,25 +9160,31 @@ def dice_loss(input, label, epsilon=0.00001):
&= \\frac{(union\_area - intersection\_area)}{total\_area}
Args:
input (Variable): The predictions with rank>=2. The first dimension is batch size,
and the last dimension is class number.
label (Variable): The groud truth with the same rank with input. The first dimension
is batch size, and the last dimension is 1.
Parameters:
input (Variable): Tensor, rank>=2, shape is :math:`[N_1, N_2, ..., N_D]`, where :math:`N_1` is
the batch_size, :math:`N_D` is 1. It is usually the output predictions of sigmoid activation.
The data type can be float32 or float64.
label (Variable): Tensor, the groud truth with the same rank as input, shape is :math:`[N_1, N_2, ..., N_D]`.
where :math:`N_1` is the batch_size, :math:`N_D` is 1. The data type can be float32 or float64.
epsilon (float): The epsilon will be added to the numerator and denominator.
If both input and label are empty, it makes sure dice is 1.
Default: 0.00001
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
dice_loss (Variable): The dice loss with shape [1].
The dice loss with shape [1], data type is the same as `input` .
Return Type:
Varaible
Examples:
Example:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='data', shape = [3, 224, 224, 2], dtype='float32')
label = fluid.layers.data(name='label', shape=[3, 224, 224, 1], dtype='float32')
predictions = fluid.layers.softmax(x)
x = fluid.data(name='data', shape = [3, 224, 224, 1], dtype='float32')
label = fluid.data(name='label', shape=[3, 224, 224, 1], dtype='float32')
predictions = fluid.layers.sigmoid(x)
loss = fluid.layers.dice_loss(input=predictions, label=label)
"""
label = one_hot(label, depth=input.shape[-1])
......@@ -10587,10 +10616,8 @@ def crop(x, shape=None, offsets=None, name=None):
"""
Crop input into output, as specified by offsets and shape.
**Warning:** THIS FUNCTION IS DEPRECATED. It will be removed in a future version.
Instructions for updating: Use `fluid.layers.crop_tensor
<https://www.paddlepaddle.org.cn/documentation/docs/en/api/layers/nn.html#crop_tensor>`_
instead.
**Warning:** THIS OP IS DEPRECATED. It will be removed in the future version.
Instructions for updating: Use :ref:`api_fluid_layers_crop_tensor` instead.
.. code-block:: text
......@@ -10620,26 +10647,30 @@ def crop(x, shape=None, offsets=None, name=None):
Out = [[1, 2, 5],
[3, 4, 6]].
Args:
x (Variable): The input tensor variable.
shape (Variable|list/tuple of integer): The output shape is specified
by `shape`, which can be a Variable or a list/tuple of integer.
If a tensor Variable, it's rank must be the same as `x`. This way
Parameters:
x (Variable): Tensor, data type can be float32 or float64.
shape (Variable|list/tuple of integers): The output shape is specified
by `shape`, which can be a Tensor or a list/tuple of integers.
If it is a Tensor, it's rank must be the same as `x` , only
it's shape will be used, and the value of it will be ignored. This way
is suitable for the case that the output shape may be changed each
iteration. If a list/tuple of integer, it's length must be the same
iteration. If it is a list/tuple of integers, it's length must be the same
as the rank of `x`
offsets (Variable|list/tuple of integer|None): Specifies the cropping
offsets at each dimension. It can be a Variable or a list/tuple
of integers. If a tensor Variable, it's rank must be the same as `x`.
offsets (Variable|list/tuple of integers|None): Specifies the cropping
offsets at each dimension. It can be a Tensor or a list/tuple
of integers. If it is a Tensor, it's rank must be the same as `x`.
This way is suitable for the case that the offsets may be changed
each iteration. If a list/tuple of integer, it's length must be the
same as the rank of `x`. If None, the offsets are 0 at each
dimension.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
each iteration. If it is a list/tuple of integers, it's length must be the
same as the rank of `x`. If None, the offsets are 0 at each dimension.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name` . Usually name is no need to set and
None by default.
Returns:
Variable: The cropped tensor variable.
The cropped Tensor, which has the same rank and data type with `x`
Return Type:
Variable
Raises:
ValueError: If shape is not a list, tuple or Variable.
......@@ -10649,13 +10680,13 @@ def crop(x, shape=None, offsets=None, name=None):
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3, 5], dtype="float32")
y = fluid.layers.data(name="y", shape=[2, 3], dtype="float32")
x = fluid.data(name="x", shape=[3, 3, 5], dtype="float32")
y = fluid.data(name="y", shape=[2, 2, 3], dtype="float32")
crop = fluid.layers.crop(x, shape=y)
# or
z = fluid.layers.data(name="z", shape=[3, 5], dtype="float32")
crop = fluid.layers.crop(z, shape=[-1, 2, 3])
z = fluid.data(name="z", shape=[3, 3, 5], dtype="float32")
crop = fluid.layers.crop(z, shape=[2, 2, 3])
"""
helper = LayerHelper('crop', **locals())
......@@ -13483,18 +13514,23 @@ def clip(x, min, max, name=None):
Args:
x(${x_type}): ${x_comment}
min(${min_type}): ${min_comment}
max(${max_type}): ${max_comment}
name(basestring|None): Name of the output.
min(float): ${min_comment}
max(float): ${max_comment}
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
out(${out_type}): ${out_comment}
${out_comment}
Return Type:
${out_type}
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.data(
input = fluid.data(
name='data', shape=[1], dtype='float32')
reward = fluid.layers.clip(x=input, min=-1.0, max=1.0)
"""
......@@ -14898,28 +14934,33 @@ def psroi_pool(input,
"""
${comment}
Args:
Parameters:
input (Variable): ${x_comment}
rois (Variable): ROIs (Regions of Interest) to pool over.It should be
rois (Variable): LoDTensor, ROIs (Regions of Interest) to pool over.It should be
a 2-D LoDTensor of shape (num_rois, 4), the lod level
is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
the top left coordinates, and (x2, y2) is the bottom
right coordinates.
output_channels (integer): ${output_channels_comment}
right coordinates. The data type is the same as `input`
output_channels (int): ${output_channels_comment}
spatial_scale (float): ${spatial_scale_comment} Default: 1.0
pooled_height (integer): ${pooled_height_comment} Default: 1
pooled_width (integer): ${pooled_width_comment} Default: 1
name (str, default None): The name of this layer.
pooled_height (int): ${pooled_height_comment} Default: 1
pooled_width (int): ${pooled_width_comment} Default: 1
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: ${out_comment}.
${out_comment}.
Return Type:
Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[490, 28, 28], dtype='float32')
rois = fluid.layers.data(name='rois', shape=[4], lod_level=1, dtype='float32')
x = fluid.data(name='x', shape=[100, 490, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32')
pool_out = fluid.layers.psroi_pool(x, rois, 10, 1.0, 7, 7)
"""
helper = LayerHelper('psroi_pool', **locals())
......@@ -15739,12 +15780,12 @@ def deformable_conv(input,
def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
"""
This function returns a col buffer of sliding local blocks of input x, also known
This op returns a col buffer of sliding local blocks of input x, also known
as im2col for batched 2D image tensors. For each block under the convolution filter,
all element will be rearranged as a column. While the convolution filter silding over
the input feature map, a series of such columns will be formed.
For each input :math:`X` with shape [N, C, H, W], the output shape [N, Cout, Lout]
For each input :math:`x` with shape [N, C, H, W], the output shape [N, Cout, Lout]
can be calculated as following.
.. math::
......@@ -15762,8 +15803,9 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
Lout &= hout \\times wout
Args:
x(Varaible): The input tensor of format [N, C, H, W].
Parameters:
x(Varaible): 4-D Tensor, input tensor of format [N, C, H, W],
data type can be float32 or float64
kernel_sizes(int|list): The size of convolution kernel, should be [k_h, k_w]
or an integer k treated as [k, k].
strides(int|list): The strides, should be [stride_h, stride_w]
......@@ -15779,17 +15821,27 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
dilations(int|list): the dilations of convolution kernel, shold be
[dilation_h, dilation_w], or an integer dialtion treated as
[dilation, dilation]. For default, it will be [1, 1].
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The tensor variable corresponding to the sliding local blocks. The output shape is [N, Cout, Lout] as decribled above. Cout is the total number of values within each block, and Lout is the total number of such blocks.
The tensor variable corresponding to the sliding local blocks.
The output shape is [N, Cout, Lout] as decribled above.
Cout is the total number of values within each block,
and Lout is the total number of such blocks.
The data type of output is the same as the input :math:`x`
Return Type:
Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name = 'data', shape = [3, 224, 224], dtype = 'float32')
x = fluid.data(name = 'data', shape = [100, 3, 224, 224], dtype = 'float32')
y = fluid.layers.unfold(x, [3, 3], 1, 1, 1)
"""
......
......@@ -42,25 +42,24 @@ def simple_img_conv_pool(input,
act=None,
use_cudnn=True):
"""
The simple_img_conv_pool is composed with one Convolution2d and one Pool2d.
The simple_img_conv_pool api is composed of :ref:`api_fluid_layers_conv2d` and :ref:`api_fluid_layers_pool2d` .
Args:
input (Variable): The input image with [N, C, H, W] format.
num_filters(int): The number of filter. It is as same as the output
feature channel.
input (Variable): 4-D Tensor, shape is [N, C, H, W], data type can be float32 or float64.
num_filters(int): The number of filters. It is the same as the output channels.
filter_size (int|list|tuple): The filter size. If filter_size is a list or
tuple, it must contain two integers, (filter_size_H, filter_size_W). Otherwise,
the filter_size_H = filter_size_W = filter_size.
pool_size (int|list|tuple): The pooling size of Pool2d layer. If pool_size
pool_size (int|list|tuple): The pooling size of pool2d layer. If pool_size
is a list or tuple, it must contain two integers, (pool_size_H, pool_size_W).
Otherwise, the pool_size_H = pool_size_W = pool_size.
pool_stride (int|list|tuple): The pooling stride of Pool2d layer. If pool_stride
pool_stride (int|list|tuple): The pooling stride of pool2d layer. If pool_stride
is a list or tuple, it must contain two integers, (pooling_stride_H, pooling_stride_W).
Otherwise, the pooling_stride_H = pooling_stride_W = pool_stride.
pool_padding (int|list|tuple): The padding of Pool2d layer. If pool_padding is a list or
pool_padding (int|list|tuple): The padding of pool2d layer. If pool_padding is a list or
tuple, it must contain two integers, (pool_padding_H, pool_padding_W).
Otherwise, the pool_padding_H = pool_padding_W = pool_padding. Default 0.
pool_type (str): Pooling type can be :math:`max` for max-pooling and :math:`avg` for
pool_type (str): Pooling type can be :math:`max` for max-pooling or :math:`avg` for
average-pooling. Default :math:`max`.
global_pooling (bool): Whether to use the global pooling. If global_pooling = true,
pool_size and pool_padding while be ignored. Default False
......@@ -95,13 +94,16 @@ def simple_img_conv_pool(input,
library is installed. Default: True
Return:
Variable: The result of input after Convolution2d and Pool2d.
4-D Tensor, the result of input after conv2d and pool2d, with the same data type as :attr:`input`
Return Type:
Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
img = fluid.data(name='img', shape=[100, 1, 28, 28], dtype='float32')
conv_pool = fluid.nets.simple_img_conv_pool(input=img,
filter_size=5,
num_filters=20,
......@@ -254,17 +256,23 @@ def sequence_conv_pool(input,
pool_type="max",
bias_attr=None):
"""
The sequence_conv_pool is composed with Sequence Convolution and Pooling.
**This api takes input as an LoDTensor. If input is a Tensor, please use**
:ref:`api_fluid_nets_simple_img_conv_pool` **instead**
The sequence_conv_pool is composed of :ref:`api_fluid_layers_sequence_conv`
and :ref:`api_fluid_layers_sequence_pool` .
Args:
input (Variable): The input of sequence_conv, which supports variable-time
length input sequence. The underlying of input is a matrix with shape
input (Variable): 2-D LoDTensor, the input of sequence_conv,
which supports variable-time length input sequence.
The underlying of input is a matrix with shape
(T, N), where T is the total time steps in this mini-batch and N is
the input_hidden_size
the input_hidden_size. The data type is float32 or float64.
num_filters(int): The number of filter.
filter_size (int): The filter size.
param_attr (ParamAttr): The parameters to the Sequence_conv Layer. Default: None.
act (str): Activation type for Sequence_conv Layer. Default: "sigmoid".
param_attr (ParamAttr): The parameters of the sequence_conv Layer. Default: None.
act (str|None): Activation type for Sequence_conv Layer.
If set to None, no activation will be applied. Default: "sigmoid".
pool_type (str): Pooling type can be :math:`max` for max-pooling, :math:`average` for
average-pooling, :math:`sum` for sum-pooling, :math:`sqrt` for sqrt-pooling.
Default :math:`max`.
......@@ -274,8 +282,12 @@ def sequence_conv_pool(input,
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
Return:
Variable: The final result after Sequence Convolution and Pooling.
Returns:
The final result after sequence_conv and sequence_pool.
It is a 2-D Tensor, with the same data type as :attr:`input`
Return Type:
Variable
Examples:
.. code-block:: python
......@@ -284,7 +296,7 @@ def sequence_conv_pool(input,
input_dim = 100 #len(word_dict)
emb_dim = 128
hid_dim = 512
data = fluid.layers.data(name="words", shape=[1], dtype="int64", lod_level=1)
data = fluid.data(name="words", shape=[None, 1], dtype="int64", lod_level=1)
emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim], is_sparse=True)
seq_conv = fluid.nets.sequence_conv_pool(input=emb,
num_filters=hid_dim,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册