未验证 提交 e705fede 编写于 作者: R ruri 提交者: GitHub

refine 13 en docs (#20312)

* refine 13 en doc
上级 f0dabe68
......@@ -134,7 +134,7 @@ paddle.fluid.layers.crf_decoding (ArgSpec(args=['input', 'param_attr', 'label',
paddle.fluid.layers.cos_sim (ArgSpec(args=['X', 'Y'], varargs=None, keywords=None, defaults=None), ('document', '48ec1ba2d75c4e2faf8d9a47350462ae'))
paddle.fluid.layers.cross_entropy (ArgSpec(args=['input', 'label', 'soft_label', 'ignore_index'], varargs=None, keywords=None, defaults=(False, -100)), ('document', 'd1985a930a59c3bd41a7c1d72594f5b9'))
paddle.fluid.layers.bpr_loss (ArgSpec(args=['input', 'label', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ae57e6e5136dade436f0df1f11770afa'))
paddle.fluid.layers.square_error_cost (ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None), ('document', 'bbb9e708bab250359864fefbdf48e9d9'))
paddle.fluid.layers.square_error_cost (ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None), ('document', '4ed09e115b50ec7393674c4c09d223a2'))
paddle.fluid.layers.chunk_eval (ArgSpec(args=['input', 'label', 'chunk_scheme', 'num_chunk_types', 'excluded_chunk_types', 'seq_length'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'b02844e0ad4bd713c5fe6802aa13219c'))
paddle.fluid.layers.sequence_conv (ArgSpec(args=['input', 'num_filters', 'filter_size', 'filter_stride', 'padding', 'padding_start', 'bias_attr', 'param_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(3, 1, True, None, None, None, None, None)), ('document', 'ebddcc5a1073ef065d22b4673e36b1d2'))
paddle.fluid.layers.conv2d (ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name', 'data_format'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None, 'NCHW')), ('document', 'e91c63b8ac8c35982c0ac518537e44bf'))
......@@ -170,8 +170,8 @@ paddle.fluid.layers.sequence_slice (ArgSpec(args=['input', 'offset', 'length', '
paddle.fluid.layers.dropout (ArgSpec(args=['x', 'dropout_prob', 'is_test', 'seed', 'name', 'dropout_implementation'], varargs=None, keywords=None, defaults=(False, None, None, 'downgrade_in_infer')), ('document', '392dd4bad607fd853f71fec71801044f'))
paddle.fluid.layers.split (ArgSpec(args=['input', 'num_or_sections', 'dim', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '64073050d3f172d71ace73d7bbb4168e'))
paddle.fluid.layers.ctc_greedy_decoder (ArgSpec(args=['input', 'blank', 'input_length', 'padding_value', 'name'], varargs=None, keywords=None, defaults=(None, 0, None)), ('document', '31e0cbec2898efae95853034adadfe2b'))
paddle.fluid.layers.edit_distance (ArgSpec(args=['input', 'label', 'normalized', 'ignored_tokens', 'input_length', 'label_length'], varargs=None, keywords=None, defaults=(True, None, None, None)), ('document', '77cbfb28cd2fc589f589c7013c5086cd'))
paddle.fluid.layers.l2_normalize (ArgSpec(args=['x', 'axis', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-12, None)), ('document', 'c1df110ea65998984f564c5c10abc54a'))
paddle.fluid.layers.edit_distance (ArgSpec(args=['input', 'label', 'normalized', 'ignored_tokens', 'input_length', 'label_length'], varargs=None, keywords=None, defaults=(True, None, None, None)), ('document', '25f0dd786a98aac31490020725604fe1'))
paddle.fluid.layers.l2_normalize (ArgSpec(args=['x', 'axis', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-12, None)), ('document', '30eeab67154ef09ab3e884117a8d4aee'))
paddle.fluid.layers.matmul (ArgSpec(args=['x', 'y', 'transpose_x', 'transpose_y', 'alpha', 'name'], varargs=None, keywords=None, defaults=(False, False, 1.0, None)), ('document', '3720b4a386585094435993deb028b592'))
paddle.fluid.layers.topk (ArgSpec(args=['input', 'k', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e50940f3ce5a08cc477b72f517491bf3'))
paddle.fluid.layers.warpctc (ArgSpec(args=['input', 'label', 'blank', 'norm_by_times', 'input_length', 'label_length'], varargs=None, keywords=None, defaults=(0, False, None, None)), ('document', '79aaea078ddea57a82ed7906d71dedc7'))
......@@ -203,11 +203,11 @@ paddle.fluid.layers.label_smooth (ArgSpec(args=['label', 'prior_dist', 'epsilon'
paddle.fluid.layers.roi_pool (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1, 1, 1.0)), ('document', '6fc9bae94518bbf3e1a9e479f38f6537'))
paddle.fluid.layers.roi_align (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale', 'sampling_ratio', 'name'], varargs=None, keywords=None, defaults=(1, 1, 1.0, -1, None)), ('document', '3885fd76e122ac0563fa8369bcab7363'))
paddle.fluid.layers.dice_loss (ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-05, None)), ('document', '08d94daffbea3935178810bdc1633f07'))
paddle.fluid.layers.image_resize (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample', 'actual_shape', 'align_corners', 'align_mode', 'data_format'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR', None, True, 1, 'NCHW')), ('document', 'd29d829607b5ff12924197a3ba296c89'))
paddle.fluid.layers.image_resize_short (ArgSpec(args=['input', 'out_short_len', 'resample'], varargs=None, keywords=None, defaults=('BILINEAR',)), ('document', 'bd97ebfe4bdf5110a5fcb8ecb626a447'))
paddle.fluid.layers.resize_bilinear (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'align_mode', 'data_format'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 1, 'NCHW')), ('document', '44da7890c8a362a83a1c0902a1dc1e4d'))
paddle.fluid.layers.resize_trilinear (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'align_mode', 'data_format'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 1, 'NCDHW')), ('document', '5b4d0f823f94c260fe5e6f7eec60a797'))
paddle.fluid.layers.resize_nearest (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'data_format'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 'NCHW')), ('document', '0107a5cbae1aef3f381d3d769a6068eb'))
paddle.fluid.layers.image_resize (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample', 'actual_shape', 'align_corners', 'align_mode', 'data_format'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR', None, True, 1, 'NCHW')), ('document', 'e75264335c94b921712ec3a05d4854b1'))
paddle.fluid.layers.image_resize_short (ArgSpec(args=['input', 'out_short_len', 'resample'], varargs=None, keywords=None, defaults=('BILINEAR',)), ('document', 'd810787ba7ff7f8f64b68779c64a30f8'))
paddle.fluid.layers.resize_bilinear (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'align_mode', 'data_format'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 1, 'NCHW')), ('document', '06f0e873b64bf993895fb83d1dba882a'))
paddle.fluid.layers.resize_trilinear (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'align_mode', 'data_format'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 1, 'NCDHW')), ('document', 'f754e5f8b402b83b8bd83275571b2082'))
paddle.fluid.layers.resize_nearest (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'data_format'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 'NCHW')), ('document', '7be7d9f5a514e9224591b96562adb00f'))
paddle.fluid.layers.gather (ArgSpec(args=['input', 'index', 'overwrite'], varargs=None, keywords=None, defaults=(True,)), ('document', '5c52e8512f97a84608bc3b8b3250fc70'))
paddle.fluid.layers.gather_nd (ArgSpec(args=['input', 'index', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a7d625028525167b138106f574dffdf9'))
paddle.fluid.layers.scatter (ArgSpec(args=['input', 'index', 'updates', 'name', 'overwrite'], varargs=None, keywords=None, defaults=(None, True)), ('document', '3f94c3348dc79b7b40a839d31a3eaa84'))
......@@ -255,7 +255,7 @@ paddle.fluid.layers.elementwise_mod (ArgSpec(args=['x', 'y', 'axis', 'act', 'nam
paddle.fluid.layers.elementwise_floordiv (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '67e6101c31314d4082621e8e443cfb68'))
paddle.fluid.layers.uniform_random_batch_size_like (ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0)), ('document', 'cfa120e583cd4a5bfa120c8a26f98a28'))
paddle.fluid.layers.gaussian_random (ArgSpec(args=['shape', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', 'dd4ddb66c78a2564e5d1e0e345d8286f'))
paddle.fluid.layers.sampling_id (ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', '2490492db3b41af9144bb1539e4e9116'))
paddle.fluid.layers.sampling_id (ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', '9ac9bdc45be94494d8543b8cec5c26e0'))
paddle.fluid.layers.gaussian_random_batch_size_like (ArgSpec(args=['input', 'shape', 'input_dim_idx', 'output_dim_idx', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0, 0, 0.0, 1.0, 0, 'float32')), ('document', '2aed0f546f220364fb1da724a3176f74'))
paddle.fluid.layers.sum (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', '42c43fc74347bfe9528850aa7f59b2b2'))
paddle.fluid.layers.slice (ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None), ('document', '8c622791994a0d657d8c6c9cefa5bf34'))
......@@ -286,7 +286,7 @@ paddle.fluid.layers.bilinear_tensor_product (ArgSpec(args=['x', 'y', 'size', 'ac
paddle.fluid.layers.merge_selected_rows (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b2b0e5d5c155ce24bafc38b78cd0b164'))
paddle.fluid.layers.get_tensor_from_selected_rows (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '2c568321feb4d16c41a83df43f95089d'))
paddle.fluid.layers.lstm (ArgSpec(args=['input', 'init_h', 'init_c', 'max_len', 'hidden_size', 'num_layers', 'dropout_prob', 'is_bidirec', 'is_test', 'name', 'default_initializer', 'seed'], varargs=None, keywords=None, defaults=(0.0, False, False, None, None, -1)), ('document', '5193cf1113f9d8d8f682ee5a5fc8b391'))
paddle.fluid.layers.shuffle_channel (ArgSpec(args=['x', 'group', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '276a1213dd431228cefa33c3146df34a'))
paddle.fluid.layers.shuffle_channel (ArgSpec(args=['x', 'group', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '50c06087a53aee4c466afe6fca057d2b'))
paddle.fluid.layers.temporal_shift (ArgSpec(args=['x', 'seg_num', 'shift_ratio', 'name'], varargs=None, keywords=None, defaults=(0.25, None)), ('document', 'd5945431cdcae3cda21914db5bbf383e'))
paddle.fluid.layers.py_func (ArgSpec(args=['func', 'x', 'out', 'backward_func', 'skip_vars_in_backward_input'], varargs=None, keywords=None, defaults=(None, None)), ('document', '231f91231430f5dae2b757df22317c67'))
paddle.fluid.layers.psroi_pool (ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '9bf0cc6b0717010b8ceec5dc2541d566'))
......@@ -295,7 +295,7 @@ paddle.fluid.layers.teacher_student_sigmoid_loss (ArgSpec(args=['input', 'label'
paddle.fluid.layers.huber_loss (ArgSpec(args=['input', 'label', 'delta'], varargs=None, keywords=None, defaults=None), ('document', '9d93ee81f7a3e526d68bb280bc695d6c'))
paddle.fluid.layers.kldiv_loss (ArgSpec(args=['x', 'target', 'reduction', 'name'], varargs=None, keywords=None, defaults=('mean', None)), ('document', '45f3ebbcb766fca84cb2fe6307086573'))
paddle.fluid.layers.npair_loss (ArgSpec(args=['anchor', 'positive', 'labels', 'l2_reg'], varargs=None, keywords=None, defaults=(0.002,)), ('document', '3828c4bd81c25af0ab955f52d453c587'))
paddle.fluid.layers.pixel_shuffle (ArgSpec(args=['x', 'upscale_factor'], varargs=None, keywords=None, defaults=None), ('document', '7e5cac851fd9bad344230e1044b6a565'))
paddle.fluid.layers.pixel_shuffle (ArgSpec(args=['x', 'upscale_factor'], varargs=None, keywords=None, defaults=None), ('document', '577a8daeca4a1f7f85cc63fa009ce971'))
paddle.fluid.layers.fsp_matrix (ArgSpec(args=['x', 'y'], varargs=None, keywords=None, defaults=None), ('document', '3a4eb7cce366f5fd8bc38b42b6af5ba1'))
paddle.fluid.layers.continuous_value_model (ArgSpec(args=['input', 'cvm', 'use_cvm'], varargs=None, keywords=None, defaults=(True,)), ('document', 'b335b531931cc8b2d19c65980eadfc1e'))
paddle.fluid.layers.where (ArgSpec(args=['condition'], varargs=None, keywords=None, defaults=None), ('document', '68810eedf448f2cb3abd46518dd46c39'))
......@@ -306,7 +306,7 @@ paddle.fluid.layers.deformable_roi_pooling (ArgSpec(args=['input', 'rois', 'tran
paddle.fluid.layers.filter_by_instag (ArgSpec(args=['ins', 'ins_tag', 'filter_tag', 'is_lod'], varargs=None, keywords=None, defaults=None), ('document', '7703a2088af8de4128b143ff1164ca4a'))
paddle.fluid.layers.shard_index (ArgSpec(args=['input', 'index_num', 'nshards', 'shard_id', 'ignore_value'], varargs=None, keywords=None, defaults=(-1,)), ('document', '3c6b30e9cd57b38d4a5fa1ade887f779'))
paddle.fluid.layers.hard_swish (ArgSpec(args=['x', 'threshold', 'scale', 'offset', 'name'], varargs=None, keywords=None, defaults=(6.0, 6.0, 3.0, None)), ('document', 'bd763b9ca99239d624c3cb4626e3627a'))
paddle.fluid.layers.mse_loss (ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None), ('document', 'd9ede6469288636e1b3233b461a165c9'))
paddle.fluid.layers.mse_loss (ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None), ('document', '88b967ef5132567396062d5d654b3064'))
paddle.fluid.layers.uniform_random (ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', -1.0, 1.0, 0)), ('document', '126ede8ce0e751244b1b54cd359c89d7'))
paddle.fluid.layers.data (ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)), ('document', '9d7806e31bdf727c1a23b8782a09b545'))
paddle.fluid.layers.read_file (ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None), ('document', 'd5b41c7b2df1b064fbd42dcf435268cd'))
......@@ -406,8 +406,8 @@ paddle.fluid.layers.softshrink (ArgSpec(args=['x', 'alpha'], varargs=None, keywo
paddle.fluid.layers.hard_shrink (ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)), ('document', '386a4103d2884b2f1312ebc1e8ee6486'))
paddle.fluid.layers.cumsum (ArgSpec(args=['x', 'axis', 'exclusive', 'reverse'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'c1f2e4c4511da09d5d89c556ea802bd1'))
paddle.fluid.layers.thresholded_relu (ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)), ('document', '94c71025bf11ab8172fd455350274138'))
paddle.fluid.layers.prior_box (ArgSpec(args=['input', 'image', 'min_sizes', 'max_sizes', 'aspect_ratios', 'variance', 'flip', 'clip', 'steps', 'offset', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, [1.0], [0.1, 0.1, 0.2, 0.2], False, False, [0.0, 0.0], 0.5, None, False)), ('document', '0fdf82762fd0a5acb2578a72771b5b44'))
paddle.fluid.layers.density_prior_box (ArgSpec(args=['input', 'image', 'densities', 'fixed_sizes', 'fixed_ratios', 'variance', 'clip', 'steps', 'offset', 'flatten_to_2d', 'name'], varargs=None, keywords=None, defaults=(None, None, None, [0.1, 0.1, 0.2, 0.2], False, [0.0, 0.0], 0.5, False, None)), ('document', '7a484a0da5e993a7734867a3dfa86571'))
paddle.fluid.layers.prior_box (ArgSpec(args=['input', 'image', 'min_sizes', 'max_sizes', 'aspect_ratios', 'variance', 'flip', 'clip', 'steps', 'offset', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, [1.0], [0.1, 0.1, 0.2, 0.2], False, False, [0.0, 0.0], 0.5, None, False)), ('document', '7ac39852eaabb48080183b904a4afeee'))
paddle.fluid.layers.density_prior_box (ArgSpec(args=['input', 'image', 'densities', 'fixed_sizes', 'fixed_ratios', 'variance', 'clip', 'steps', 'offset', 'flatten_to_2d', 'name'], varargs=None, keywords=None, defaults=(None, None, None, [0.1, 0.1, 0.2, 0.2], False, [0.0, 0.0], 0.5, False, None)), ('document', 'e6c33478b467a6a768aee956a83cd774'))
paddle.fluid.layers.multi_box_head (ArgSpec(args=['inputs', 'image', 'base_size', 'num_classes', 'aspect_ratios', 'min_ratio', 'max_ratio', 'min_sizes', 'max_sizes', 'steps', 'step_w', 'step_h', 'offset', 'variance', 'flip', 'clip', 'kernel_size', 'pad', 'stride', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None, None, 0.5, [0.1, 0.1, 0.2, 0.2], True, False, 1, 0, 1, None, False)), ('document', '61360150b911fa4097f1a221f5d49877'))
paddle.fluid.layers.bipartite_match (ArgSpec(args=['dist_matrix', 'match_type', 'dist_threshold', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '6f795f407a8e3a3ec3da52726c73405a'))
paddle.fluid.layers.target_assign (ArgSpec(args=['input', 'matched_indices', 'negative_indices', 'mismatch_value', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '4670c1be208835fc8edd61025c21d0e4'))
......
......@@ -1641,67 +1641,100 @@ def prior_box(input,
name=None,
min_max_aspect_ratios_order=False):
"""
**Prior Box Operator**
Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Args:
input(Variable): The Input Variables, the format is NCHW.
image(Variable): The input image data of PriorBoxOp,
the layout is NCHW.
min_sizes(list|tuple|float value): min sizes of generated prior boxes.
max_sizes(list|tuple|None): max sizes of generated prior boxes.
Parameters:
input(Variable): 4-D tenosr(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float value): the aspect ratios of generated
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] == 0.0/step[1] == 0.0, the prior boxes step across
height/weight of the input will be automatically calculated.
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
name(str): Name of the prior box op. Default: None.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
tuple: A tuple with two Variable (boxes, variances)
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output prior boxes of PriorBox.
The layout is [H, W, num_priors, 4].
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total
box count of each position of input.
num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
The layout is [H, W, num_priors, 4].
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total
box count of each position of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
input = fluid.layers.data(name="input", shape=[3,6,9])
images = fluid.layers.data(name="images", shape=[3,9,12])
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=images,
image=image,
min_sizes=[100.],
flip=True,
clip=True)
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
......@@ -1763,74 +1796,123 @@ def density_prior_box(input,
flatten_to_2d=False,
name=None):
"""
**Density Prior Box Operator**
Generate density prior boxes for SSD(Single Shot MultiBox Detector)
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
N_density_prior_box =sum(N_fixed_ratios * densities_i^2),
Args:
input(Variable): The Input Variables, the format is NCHW.
image(Variable): The input image data of PriorBoxOp,
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): the densities of generated density prior
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): the fixed sizes of generated density
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): the fixed ratios of generated density
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): the variances to be encoded in density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] == 0.0/step[1] == 0.0, the density prior boxes step across
height/weight of the input will be automatically calculated.
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str): Name of the density prior box op. Default: None.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
tuple: A tuple with two Variable (boxes, variances)
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
The layout is [H, W, num_priors, 4] when flatten_to_2d is False.
The layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
The layout is [H, W, num_priors, 4] when flatten_to_2d is False.
The layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input
num_priors is the total box count of each position of input.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
input = fluid.layers.data(name="input", shape=[3,6,9])
images = fluid.layers.data(name="images", shape=[3,9,12])
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=images,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
print(mask_out.shape)
# (1134, 4)
print(z_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
print(box.shape)
# [6L, 9L, 21L, 4L]
print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
......
......@@ -1951,39 +1951,59 @@ def bpr_loss(input, label, name=None):
def square_error_cost(input, label):
"""
**Square error cost layer**
This layer accepts input predictions and target label and returns the
This op accepts input predictions and target label and returns the
squared error cost.
For predictions, :math:`X`, and target labels, :math:`Y`, the equation is:
For predictions label, and target label, the equation is:
.. math::
Out = (X - Y)^2
In the above equation:
* :math:`X`: Input predictions, a tensor.
* :math:`Y`: Input labels, a tensor.
* :math:`Out`: Output value, same shape with :math:`X`.
Out = (input - label)^2
Args:
input (Variable): Input tensor, has predictions.
label (Variable): Label tensor, has target labels.
Parameters:
input (Variable): Input tensor, the data type should be float32.
label (Variable): Label tensor, the data type should be float32.
Returns:
Variable: The tensor variable storing the element-wise squared error \
difference of input and label.
The tensor variable storing the element-wise squared error \
difference between input and label.
Return type: Variable.
Examples:
.. code-block:: python
# declarative mode
import paddle.fluid as fluid
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.data(name='y_predict', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
import numpy as np
input = fluid.data(name="input", shape=[1])
label = fluid.data(name="label", shape=[1])
output = fluid.layers.square_error_cost(input,label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.array([1.5]).astype("float32")
label_data = np.array([1.7]).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data, "label":label_data},
fetch_list=[output],
return_numpy=True)
print(output_data)
# [array([0.04000002], dtype=float32)]
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
label = dg.to_variable(label_data)
output = fluid.layers.square_error_cost(input, label)
print(output.numpy())
# [0.04000002]
"""
helper = LayerHelper('square_error_cost', **locals())
minus_out = helper.create_variable_for_type_inference(dtype=input.dtype)
......@@ -6455,9 +6475,7 @@ def split(input, num_or_sections, dim=-1, name=None):
def l2_normalize(x, axis, epsilon=1e-12, name=None):
"""
**L2 normalize Layer**
The l2 normalize layer normalizes `x` along dimension `axis` using an L2
This op normalizes `x` along dimension `axis` using an L2
norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes
.. math::
......@@ -6468,27 +6486,57 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
slice along dimension `axis`.
Args:
x(Variable|list): The input tensor to l2_normalize layer.
x(Variable|list): The input tensor could be N-D tensor, and the input data type could be float32 or float64.
axis(int): The axis on which to apply normalization. If `axis < 0`, \
the dimension to normalization is rank(X) + axis. -1 is the
last dimension.
epsilon(float): The epsilon value is used to avoid division by zero, \
the default value is 1e-12.
name(str|None): A name for this layer(optional). If set None, the layer \
will be named automatically.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The output tensor variable is the same shape with `x`.
Variable: The output has the same shape and data type with `x`.
Examples:
.. code-block:: python
# declarative mode
import paddle.fluid as fluid
data = fluid.layers.data(name="data",
shape=(3, 17, 13),
dtype="float32")
normed = fluid.layers.l2_normalize(x=data, axis=1)
import numpy as np
input = fluid.data(name="input", shape=[2,3])
output = fluid.layers.l2_normalize(x=input,axis=0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3).astype("float32")
print(input_data)
# [[0.5171216 0.12704141 0.56018186]
# [0.93251234 0.5382788 0.81709313]]
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data)
# [array([[0.48496857, 0.22970329, 0.56545246],
# [0.8745316 , 0.9732607 , 0.82478094]], dtype=float32)]
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.l2_normalize(x=input, axis=-1)
print(output.numpy())
# [[0.66907585 0.16437206 0.7247892 ]
# [0.6899054 0.3982376 0.6045142 ]]
"""
if len(x.shape) == 1:
......@@ -6710,7 +6758,7 @@ def edit_distance(input,
input_length=None,
label_length=None):
"""
Edit distance operator computes the edit distances between a batch of
This op computes the edit distances between a batch of
hypothesis strings and their references. Edit distance, also called
Levenshtein distance, measures how dissimilar two strings are by counting
the minimum number of operations to transform one string into anthor.
......@@ -6731,9 +6779,9 @@ def edit_distance(input,
distance for a pair of strings respectively. If Attr(normalized) is true,
the edit distance will be divided by the length of reference string.
Args:
input(Variable): The indices for hypothesis strings, it should have rank 2 and dtype int64.
label(Variable): The indices for reference strings, it should have rank 2 and dtype int64.
Parameters:
input(Variable): The indices for hypothesis strings, its rank should equals to 2 and its data type should be int64.
label(Variable): The indices for reference strings, its rank should equals to 2 and its data type should be int64.
normalized(bool, default True): Indicated whether to normalize the edit distance by
the length of reference string.
ignored_tokens(list<int>, default None): Tokens that should be removed before
......@@ -6742,27 +6790,30 @@ def edit_distance(input,
label_length(Variable): The length for each sequence in `label` if it's of Tensor type, it should have shape `[batch_size]` and dtype int64.
Returns:
edit_distance_out(Variable): edit distance result in shape [batch_size, 1]. \n
Tuple:
edit_distance_out(Variable): edit distance result in shape [batch_size, 1].
sequence_num(Variable): sequence number in shape [].
Examples:
.. code-block:: python
import paddle.fluid as fluid
# using LoDTensor
x_lod = fluid.layers.data(name='x_lod', shape=[1], dtype='int64', lod_level=1)
y_lod = fluid.layers.data(name='y_lod', shape=[1], dtype='int64', lod_level=1)
x_lod = fluid.data(name='x_lod', shape=[None,1], dtype='int64', lod_level=1)
y_lod = fluid.data(name='y_lod', shape=[None,1], dtype='int64', lod_level=1)
distance_lod, seq_num_lod = fluid.layers.edit_distance(input=x_lod, label=y_lod)
# using Tensor
x_seq_len = 5
y_seq_len = 6
x_pad = fluid.layers.data(name='x_pad', shape=[x_seq_len], dtype='int64')
y_pad = fluid.layers.data(name='y_pad', shape=[y_seq_len], dtype='int64')
x_len = fluid.layers.data(name='x_len', shape=[], dtype='int64')
y_len = fluid.layers.data(name='y_len', shape=[], dtype='int64')
x_pad = fluid.data(name='x_pad', shape=[None,x_seq_len], dtype='int64')
y_pad = fluid.data(name='y_pad', shape=[None,y_seq_len], dtype='int64')
x_len = fluid.data(name='x_len', shape=[None], dtype='int64')
y_len = fluid.data(name='y_len', shape=[None], dtype='int64')
distance_pad, seq_num_pad = fluid.layers.edit_distance(input=x_pad, label=y_pad, input_length=x_len, label_length=y_len)
"""
......@@ -9273,7 +9324,7 @@ def image_resize(input,
align_mode=1,
data_format='NCHW'):
"""
**Resize a Batch of Images**
This op resizes a batch of images.
The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w)
or (num_batches, in_h, in_w, channels), or a 5-D Tensor of the shape
......@@ -9396,7 +9447,7 @@ def image_resize(input,
Args:
Parameters:
input (Variable): 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of image resize
......@@ -9460,32 +9511,63 @@ def image_resize(input,
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
input = fluid.layers.data(name="input", shape=[3, 6, 9], dtype="float32")
# input.shape = [-1, 3, 6, 9], where -1 indicates batch size, and it will get the exact value in runtime.
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.image_resize(input=input,out_shape=[12,12])
out0 = fluid.layers.image_resize(input, out_shape=[12, 12], resample="NEAREST")
# out0.shape = [-1, 3, 12, 12], it means out0.shape[0] = input.shape[0] in runtime.
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.image_resize(input=input,out_shape=[12,dim1])
# out_shape is a list in which each element is a integer or a tensor Variable
dim1 = fluid.layers.data(name="dim1", shape=[1], dtype="int32", append_batch_size=False)
out1 = fluid.layers.image_resize(input, out_shape=[12, dim1], resample="NEAREST")
# out1.shape = [-1, 3, 12, -1]
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.image_resize(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.image_resize(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# out_shape is a 1-D tensor Variable
shape_tensor = fluid.layers.data(name="shape_tensor", shape=[2], dtype="int32", append_batch_size=False)
out2 = fluid.layers.image_resize(input, out_shape=shape_tensor, resample="NEAREST")
# out2.shape = [-1, 3, -1, -1]
input_data = np.random.rand(2,3,6,10).astype("float32")
# when use actual_shape
actual_shape_tensor = fluid.layers.data(name="actual_shape_tensor", shape=[2], dtype="int32", append_batch_size=False)
out3 = fluid.layers.image_resize(input, out_shape=[4, 4], resample="NEAREST", actual_shape=actual_shape_tensor)
# out3.shape = [-1, 3, 4, 4]
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
# scale is a Variable
scale_tensor = fluid.layers.data(name="scale", shape=[1], dtype="float32", append_batch_size=False)
out4 = fluid.layers.image_resize(input, scale=scale_tensor)
# out4.shape = [-1, 3, -1, -1]
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.image_resize(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
resample_methods = {
......@@ -9644,7 +9726,7 @@ def resize_bilinear(input,
align_mode=1,
data_format='NCHW'):
"""
Resize input by performing bilinear interpolation based on given
This op resizes the input by performing bilinear interpolation based on given
output shape which specified by actual_shape, out_shape and scale
in priority order.
......@@ -9695,8 +9777,8 @@ def resize_bilinear(input,
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Args:
input(${x_type}): 4-D Tensor, its data type is float32, float64, or uint8,
Parameters:
input(Variable): 4-D Tensor(NCHW), its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of resize bilinear
layer, the shape is (out_h, out_w).Default: None. If a list, each
......@@ -9706,7 +9788,6 @@ def resize_bilinear(input,
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str|None): The output variable name.
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
......@@ -9724,40 +9805,72 @@ def resize_bilinear(input,
align_mode(bool): ${align_mode_comment}
data_format(str, optional): NCHW(num_batches, channels, height, width) or
NHWC(num_batches, height, width, channels). Default: 'NCHW'.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
A 4-D Tensor in shape of (num_batches, channels, out_h, out_w) or
(num_batches, out_h, out_w, channels).
Variable: 4-D tensor(NCHW or NHWC).
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
input = fluid.layers.data(name="input", shape=[3, 6, 9], dtype="float32")
# input.shape = [-1, 3, 6, 9], where -1 indicates batch size, and it will get the exact value in runtime.
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.resize_bilinear(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_bilinear(input=input,out_shape=[12,dim1])
out0 = fluid.layers.resize_bilinear(input, out_shape=[12, 12])
# out0.shape = [-1, 3, 12, 12], it means out0.shape[0] = input.shape[0] in runtime.
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_bilinear(input=input,out_shape=shape_tensor)
# out_shape is a list in which each element is a integer or a tensor Variable
dim1 = fluid.layers.data(name="dim1", shape=[1], dtype="int32", append_batch_size=False)
out1 = fluid.layers.resize_bilinear(input, out_shape=[12, dim1])
# out1.shape = [-1, 3, 12, -1]
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_bilinear(input=input,scale=scale_tensor)
# out_shape is a 1-D tensor Variable
shape_tensor = fluid.layers.data(name="shape_tensor", shape=[2], dtype="int32", append_batch_size=False)
out2 = fluid.layers.resize_bilinear(input, out_shape=shape_tensor)
# out2.shape = [-1, 3, -1, -1]
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# when use actual_shape
actual_shape_tensor = fluid.layers.data(name="actual_shape_tensor", shape=[2], dtype="int32", append_batch_size=False)
out3 = fluid.layers.resize_bilinear(input, out_shape=[4, 4], actual_shape=actual_shape_tensor)
# out3.shape = [-1, 3, 4, 4]
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_bilinear(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
# scale is a Variable
scale_tensor = fluid.layers.data(name="scale", shape=[1], dtype="float32", append_batch_size=False)
out4 = fluid.layers.resize_bilinear(input, scale=scale_tensor)
# out4.shape = [-1, 3, -1, -1]
"""
return image_resize(input, out_shape, scale, name, 'BILINEAR', actual_shape,
......@@ -9774,7 +9887,7 @@ def resize_trilinear(input,
align_mode=1,
data_format='NCDHW'):
"""
Resize input by performing trilinear interpolation based on given
This op resizes the input by performing trilinear interpolation based on given
output shape which specified by actual_shape, out_shape and scale
in priority order.
......@@ -9828,18 +9941,15 @@ def resize_trilinear(input,
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Args:
Parameters:
input(${x_type}): 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of resize bilinear
layer, the shape is (out_d, out_h, out_w). Default: None. If a list,
each element can be an integer or a Tensor Variable with shape: [1]. If
a Tensor Variable, its dimension size should be 1.
out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_d, out_h, out_w). Default: None. Every element should be an integer or a Tensor Variable with shape: [1] if it is a list. If it is a Tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input depth, height or width.
At least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str|None): The output variable name.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
......@@ -9860,38 +9970,71 @@ def resize_trilinear(input,
Default: 'NCDHW'.
Returns:
A 5-D Tensor in shape of (num_batches, channels, out_d, out_h, out_w) or
(num_batches, out_d, out_h, out_w, channels).
Variable: A 5-D Tensor(NCDHW or NDHWC)
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
input = fluid.layers.data(name="input", shape=[3, 6, 9, 11], dtype="float32")
# input.shape = [-1, 3, 6, 9, 11], where -1 indicates batch size, and it will get the exact value in runtime.
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,8,10])
out0 = fluid.layers.resize_trilinear(input, out_shape=[12, 12, 12])
# out0.shape = [-1, 3, 12, 12, 12], it means out0.shape[0] = input.shape[0] in runtime.
#1
output = fluid.layers.resize_trilinear(input=input,out_shape=[12,12,12])
# out_shape is a list in which each element is a integer or a tensor Variable
dim1 = fluid.layers.data(name="dim1", shape=[1], dtype="int32", append_batch_size=False)
out1 = fluid.layers.resize_trilinear(input, out_shape=[12, dim1, 4])
# out1.shape = [-1, 3, 12, -1, 4]
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_trilinear(input=input,out_shape=[12,dim1,4])
#3
#x = np.array([3,12,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[3], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_trilinear(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_trilinear(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,8,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12, 12)
#2
# (2, 3, 12, 2, 4)
#3
# (2, 3, 3, 12, 12)
#4
# (2, 3, 3, 4, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_trilinear(input=input, out_shape=[12,12,12])
print(output.shape)
# [2L, 3L, 12L, 12L, 12L]
# out_shape is a 1-D tensor Variable
shape_tensor = fluid.layers.data(name="shape_tensor", shape=[3], dtype="int32", append_batch_size=False)
out2 = fluid.layers.resize_trilinear(input, out_shape=shape_tensor)
# out2.shape = [-1, 3, -1, -1, -1]
# when use actual_shape
actual_shape_tensor = fluid.layers.data(name="actual_shape_tensor", shape=[3], dtype="int32", append_batch_size=False)
out3 = fluid.layers.resize_trilinear(input, out_shape=[4, 4, 8], actual_shape=actual_shape_tensor)
# out3.shape = [-1, 3, 4, 4, 8]
# scale is a Variable
scale_tensor = fluid.layers.data(name="scale", shape=[1], dtype="float32", append_batch_size=False)
out4 = fluid.layers.resize_trilinear(input, scale=scale_tensor)
# out4.shape = [-1, 3, -1, -1, -1]
"""
return image_resize(input, out_shape, scale, name, 'TRILINEAR',
......@@ -9907,7 +10050,7 @@ def resize_nearest(input,
align_corners=True,
data_format='NCHW'):
"""
Resize input by performing nearest neighbor interpolation in both the
This op resizes the input by performing nearest neighbor interpolation in both the
height direction and the width direction based on given output shape
which is specified by actual_shape, out_shape and scale in priority order.
......@@ -9951,18 +10094,15 @@ def resize_nearest(input,
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation
Args:
Parameters:
input(${x_type}): 4-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of resize nearest
layer, the shape is (out_h, out_w). Default: None. If a list, each
element can be integer or a tensor Variable with shape: [1]. If a
tensor Variable, its dimension size should be 1.
out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_h, out_w). Default: None. Every element should be an integer or a tensor Variable with shape: [1] if it is a list. If it is a tensor Variable, its dimension size should be 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str|None): The output variable name.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
......@@ -9982,38 +10122,71 @@ def resize_nearest(input,
Default: 'NCHW'.
Returns:
A 4-D Tensor in shape of (num_batches, channels, out_h, out_w) or
(num_batches, out_h, out_w, channels).
Variable: 4-D tensor(NCHW or NHWC).
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
input = fluid.layers.data(name="input", shape=[3, 6, 9], dtype="float32")
# input.shape = [-1, 3, 6, 9], where -1 indicates batch size, and it will get the exact value in runtime.
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.resize_nearest(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.resize_nearest(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = fluid.layers.resize_nearest(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.resize_nearest(input=input,scale=scale_tensor)
out0 = fluid.layers.resize_nearest(input, out_shape=[12, 12])
# out0.shape = [-1, 3, 12, 12], it means out0.shape[0] = input.shape[0] in runtime.
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.resize_nearest(input=input, out_shape=[12,12])
print(output.shape)
# out_shape is a list in which each element is a integer or a tensor Variable
dim1 = fluid.layers.data(name="dim1", shape=[1], dtype="int32", append_batch_size=False)
out1 = fluid.layers.resize_nearest(input, out_shape=[12, dim1])
# out1.shape = [-1, 3, 12, -1]
# [2L, 3L, 12L, 12L]
# out_shape is a 1-D tensor Variable
shape_tensor = fluid.layers.data(name="resize_shape", shape=[2], dtype="int32", append_batch_size=False)
out2 = fluid.layers.resize_nearest(input, out_shape=shape_tensor)
# out2.shape = [-1, 3, -1, -1]
# when use actual_shape
actual_shape_tensor = fluid.layers.data(name="actual_shape_tensor", shape=[2], dtype="int32", append_batch_size=False)
out3 = fluid.layers.resize_nearest(input, out_shape=[4, 4], actual_shape=actual_shape_tensor)
# out3.shape = [-1, 3, 4, 4]
# scale is a Variable
scale_tensor = fluid.layers.data(name="scale", shape=[1], dtype="float32", append_batch_size=False)
out4 = fluid.layers.resize_nearest(input, scale=scale_tensor)
# out4.shape = [-1, 3, -1, -1]
"""
return image_resize(
......@@ -10030,27 +10203,24 @@ def resize_nearest(input,
def image_resize_short(input, out_short_len, resample='BILINEAR'):
"""
Resize a batch of images. The short edge of input images will be
This op resizes a batch of images. The short edge of input images will be
resized to the given 'out_short_len'. The long edge of input images
will be resized proportionately to make images' length-width ratio
constant.
Args:
input (Variable): The input tensor of image resize layer,
This is a 4-D tensor of the shape
(num_batches, channels, in_h, in_w).
Parameters:
input (Variable): 4-D tensor(NCHW), The input tensor of image resize layer.
out_short_len(int): The length of output images' short edge.
resample (str): resample method, default: BILINEAR.
Returns:
Variable: The output is a 4-D tensor of the shape
(num_batches, channls, out_h, out_w).
Variable: 4-D tensor(NCHW).
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.data(name="input", shape=[3,6,9], dtype="float32")
input = fluid.data(name="input", shape=[None,3,6,9], dtype="float32")
out = fluid.layers.image_resize_short(input, out_short_len=3)
"""
in_shape = input.shape
......@@ -12446,27 +12616,26 @@ def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'):
@templatedoc()
def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'):
"""
${comment}
This op is used for sampling id from multinomial distribution from the input, sampling one id for one sample.
Args:
x (Variable): ${x_comment}
min (Float): ${min_comment}
max (Float): ${max_comment}
seed (Float): ${seed_comment}
Parameters:
x (Variable): 2-D tensor, [batch_size, input_feature_dimensions]
min (Float): minimum , default 0.0.
max (Float): maximum, default 1.0.
seed (Float): Random seed, default 0. if seed is not 0, will generate same number every time.
dtype(np.dtype|core.VarDesc.VarType|str): The type of output data : float32, float_16, int etc
Returns:
out (Variable): ${out_comment}
Variable: sampling tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(
x = fluid.data(
name="X",
shape=[13, 11],
dtype='float32',
append_batch_size=False)
dtype='float32')
out = fluid.layers.sampling_id(x)
"""
......@@ -15033,8 +15202,6 @@ def get_tensor_from_selected_rows(x, name=None):
def shuffle_channel(x, group, name=None):
"""
**Shuffle Channel Operator**
This operator shuffles the channels of input x.
It divide the input channels in each group into :attr:`group` subgroups,
and obtain a new order by selecting element from every subgroup one by one.
......@@ -15087,7 +15254,7 @@ def shuffle_channel(x, group, name=None):
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.data(name='input', shape=[4,2,2], dtype='float32')
input = fluid.data(name='input', shape=[None,4,2,2], dtype='float32')
out = fluid.layers.shuffle_channel(x=input, group=2)
"""
helper = LayerHelper("shuffle_channel", **locals())
......@@ -15660,9 +15827,7 @@ def npair_loss(anchor, positive, labels, l2_reg=0.002):
def pixel_shuffle(x, upscale_factor):
"""
**Pixel Shuffle Layer**
This layer rearranges elements in a tensor of shape [N, C, H, W]
This op rearranges elements in a tensor of shape [N, C, H, W]
to a tensor of shape [N, C/r**2, H*r, W*r].
This is useful for implementing efficient sub-pixel convolution
with a stride of 1/r.
......@@ -15670,35 +15835,37 @@ def pixel_shuffle(x, upscale_factor):
Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158v2>`_ .
by Shi et. al (2016) for more details.
.. code-block:: text
Given a 4-D tensor with the shape:
x.shape = [1, 9, 4, 4]
Given upscale_factor:
upscale_factor= 3
output shape is:
[1, 1, 12, 12]
Args:
Parameters:
x(Variable): The input tensor variable.
upscale_factor(int): factor to increase spatial resolution
x(Variable): 4-D tensor, the data type should be float32 or float64.
upscale_factor(int): factor to increase spatial resolution.
Returns:
Out(Variable): Reshaped tensor according to the new dimension.
Raises:
ValueError: If the square of upscale_factor cannot divide the channels of input.
Examples:
.. code-block:: python
# declarative mode
import paddle.fluid as fluid
input = fluid.layers.data(name="input", shape=[9,4,4])
import numpy as np
input = fluid.data(name="input", shape=[2,9,4,4])
output = fluid.layers.pixel_shuffle(x=input, upscale_factor=3)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,9,4,4).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
# print(output.shape)
# (2L, 1L, 12L, 12L)
"""
......@@ -16637,36 +16804,55 @@ def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None):
def mse_loss(input, label):
"""
**Mean square error layer**
This layer accepts input predications and target label and returns the mean square error.
This op accepts input predications and target label and returns the mean square error.
The loss can be described as:
.. math::
Out = mean((X - Y)^2)
In the above equation:
* :math:`X`: Input predications, a tensor.
* :math:`Y`: Input labels, a tensor.
* :math:`Out`: Output value, same shape with :math:`X`.
Out = MEAN((input - label)^2)
Args:
input (Variable): Input tensor, has predictions.
label (Variable): Label tensor, has target labels.
Parameters:
input (Variable): Input tensor, the data type should be float32.
label (Variable): Label tensor, the data type shoulf be float32.
Returns:
Variable: The tensor variable storing the mean square error difference of input and label.
Return type: Variable.
Examples:
.. code-block:: python
# declarative mode
import paddle.fluid as fluid
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.data(name='y_predict', shape=[1], dtype='float32')
mse = fluid.layers.mse_loss(input=y_predict, label=y)
import numpy as np
input = fluid.data(name="input", shape=[1])
label = fluid.data(name="label", shape=[1])
output = fluid.layers.mse_loss(input,label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.array([1.5]).astype("float32")
label_data = np.array([1.7]).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data, "label":label_data},
fetch_list=[output],
return_numpy=True)
print(output_data)
# [array([0.04000002], dtype=float32)]
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
label = dg.to_variable(label_data)
output = fluid.layers.mse_loss(input, label)
print(output.numpy())
# [0.04000002]
"""
return reduce_mean(square_error_cost(input, label))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册