提交 8fb0635c 编写于 作者: L LielinJiang 提交者: wangguanzhong

[cherry-pick]Polish english apis' doc for 1.6 (#20374)

* Polish english apis' doc (#20198)

* refine Normal Uniform documnet

* refine eng doc, test=release/1.6, test=document_fix
上级 17f9bff0
...@@ -279,7 +279,7 @@ paddle.fluid.layers.maxout (ArgSpec(args=['x', 'groups', 'name'], varargs=None, ...@@ -279,7 +279,7 @@ paddle.fluid.layers.maxout (ArgSpec(args=['x', 'groups', 'name'], varargs=None,
paddle.fluid.layers.space_to_depth (ArgSpec(args=['x', 'blocksize', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '26decdea9376b6b9a0d3432d82ca207b')) paddle.fluid.layers.space_to_depth (ArgSpec(args=['x', 'blocksize', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '26decdea9376b6b9a0d3432d82ca207b'))
paddle.fluid.layers.affine_grid (ArgSpec(args=['theta', 'out_shape', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'f85b263b7b6698d000977529a28f202b')) paddle.fluid.layers.affine_grid (ArgSpec(args=['theta', 'out_shape', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'f85b263b7b6698d000977529a28f202b'))
paddle.fluid.layers.sequence_reverse (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5b32ed21ab89140a8e758002923a0da3')) paddle.fluid.layers.sequence_reverse (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5b32ed21ab89140a8e758002923a0da3'))
paddle.fluid.layers.affine_channel (ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name', 'act'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None, None)), ('document', '9f303c67538e468a36c5904a0a3aa110')) paddle.fluid.layers.affine_channel (ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name', 'act'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None, None)), ('document', 'ecc4b1323028bde0518d666882d03515'))
paddle.fluid.layers.similarity_focus (ArgSpec(args=['input', 'axis', 'indexes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '18ec2e3afeb90e70c8b73d2b71c40fdb')) paddle.fluid.layers.similarity_focus (ArgSpec(args=['input', 'axis', 'indexes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '18ec2e3afeb90e70c8b73d2b71c40fdb'))
paddle.fluid.layers.hash (ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', 'a0b73c21be618cec0281e7903039e5e3')) paddle.fluid.layers.hash (ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', 'a0b73c21be618cec0281e7903039e5e3'))
paddle.fluid.layers.grid_sampler (ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5d16663e096d7f04954c70ce1cc5e195')) paddle.fluid.layers.grid_sampler (ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5d16663e096d7f04954c70ce1cc5e195'))
...@@ -424,7 +424,7 @@ paddle.fluid.layers.roi_perspective_transform (ArgSpec(args=['input', 'rois', 't ...@@ -424,7 +424,7 @@ paddle.fluid.layers.roi_perspective_transform (ArgSpec(args=['input', 'rois', 't
paddle.fluid.layers.generate_proposal_labels (ArgSpec(args=['rpn_rois', 'gt_classes', 'is_crowd', 'gt_boxes', 'im_info', 'batch_size_per_im', 'fg_fraction', 'fg_thresh', 'bg_thresh_hi', 'bg_thresh_lo', 'bbox_reg_weights', 'class_nums', 'use_random', 'is_cls_agnostic', 'is_cascade_rcnn'], varargs=None, keywords=None, defaults=(256, 0.25, 0.25, 0.5, 0.0, [0.1, 0.1, 0.2, 0.2], None, True, False, False)), ('document', '69def376b42ef0681d0cc7f53a2dac4b')) paddle.fluid.layers.generate_proposal_labels (ArgSpec(args=['rpn_rois', 'gt_classes', 'is_crowd', 'gt_boxes', 'im_info', 'batch_size_per_im', 'fg_fraction', 'fg_thresh', 'bg_thresh_hi', 'bg_thresh_lo', 'bbox_reg_weights', 'class_nums', 'use_random', 'is_cls_agnostic', 'is_cascade_rcnn'], varargs=None, keywords=None, defaults=(256, 0.25, 0.25, 0.5, 0.0, [0.1, 0.1, 0.2, 0.2], None, True, False, False)), ('document', '69def376b42ef0681d0cc7f53a2dac4b'))
paddle.fluid.layers.generate_proposals (ArgSpec(args=['scores', 'bbox_deltas', 'im_info', 'anchors', 'variances', 'pre_nms_top_n', 'post_nms_top_n', 'nms_thresh', 'min_size', 'eta', 'name'], varargs=None, keywords=None, defaults=(6000, 1000, 0.5, 0.1, 1.0, None)), ('document', 'b7d707822b6af2a586bce608040235b1')) paddle.fluid.layers.generate_proposals (ArgSpec(args=['scores', 'bbox_deltas', 'im_info', 'anchors', 'variances', 'pre_nms_top_n', 'post_nms_top_n', 'nms_thresh', 'min_size', 'eta', 'name'], varargs=None, keywords=None, defaults=(6000, 1000, 0.5, 0.1, 1.0, None)), ('document', 'b7d707822b6af2a586bce608040235b1'))
paddle.fluid.layers.generate_mask_labels (ArgSpec(args=['im_info', 'gt_classes', 'is_crowd', 'gt_segms', 'rois', 'labels_int32', 'num_classes', 'resolution'], varargs=None, keywords=None, defaults=None), ('document', 'b319b10ddaf17fb4ddf03518685a17ef')) paddle.fluid.layers.generate_mask_labels (ArgSpec(args=['im_info', 'gt_classes', 'is_crowd', 'gt_segms', 'rois', 'labels_int32', 'num_classes', 'resolution'], varargs=None, keywords=None, defaults=None), ('document', 'b319b10ddaf17fb4ddf03518685a17ef'))
paddle.fluid.layers.iou_similarity (ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '72fca4a39ccf82d5c746ae62d1868a99')) paddle.fluid.layers.iou_similarity (ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e24478fd1fcf1727d4947fe14356b3d4'))
paddle.fluid.layers.box_coder (ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name', 'axis'], varargs=None, keywords=None, defaults=('encode_center_size', True, None, 0)), ('document', '511d7033c0cfce1a5b88c04ad6e7ed5b')) paddle.fluid.layers.box_coder (ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name', 'axis'], varargs=None, keywords=None, defaults=('encode_center_size', True, None, 0)), ('document', '511d7033c0cfce1a5b88c04ad6e7ed5b'))
paddle.fluid.layers.polygon_box_transform (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e308ce1661cb722b220a6f482f85b9e4')) paddle.fluid.layers.polygon_box_transform (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e308ce1661cb722b220a6f482f85b9e4'))
paddle.fluid.layers.yolov3_loss (ArgSpec(args=['x', 'gt_box', 'gt_label', 'anchors', 'anchor_mask', 'class_num', 'ignore_thresh', 'downsample_ratio', 'gt_score', 'use_label_smooth', 'name'], varargs=None, keywords=None, defaults=(None, True, None)), ('document', 'df35e6510e8db0844320ec77dc8b7dc4')) paddle.fluid.layers.yolov3_loss (ArgSpec(args=['x', 'gt_box', 'gt_label', 'anchors', 'anchor_mask', 'class_num', 'ignore_thresh', 'downsample_ratio', 'gt_score', 'use_label_smooth', 'name'], varargs=None, keywords=None, defaults=(None, True, None)), ('document', 'df35e6510e8db0844320ec77dc8b7dc4'))
...@@ -446,18 +446,18 @@ paddle.fluid.layers.piecewise_decay (ArgSpec(args=['boundaries', 'values'], vara ...@@ -446,18 +446,18 @@ paddle.fluid.layers.piecewise_decay (ArgSpec(args=['boundaries', 'values'], vara
paddle.fluid.layers.noam_decay (ArgSpec(args=['d_model', 'warmup_steps'], varargs=None, keywords=None, defaults=None), ('document', 'fd57228fb76195e66bbcc8d8e42c494d')) paddle.fluid.layers.noam_decay (ArgSpec(args=['d_model', 'warmup_steps'], varargs=None, keywords=None, defaults=None), ('document', 'fd57228fb76195e66bbcc8d8e42c494d'))
paddle.fluid.layers.cosine_decay (ArgSpec(args=['learning_rate', 'step_each_epoch', 'epochs'], varargs=None, keywords=None, defaults=None), ('document', '1062e487dd3b50a6e58b5703b4f594c9')) paddle.fluid.layers.cosine_decay (ArgSpec(args=['learning_rate', 'step_each_epoch', 'epochs'], varargs=None, keywords=None, defaults=None), ('document', '1062e487dd3b50a6e58b5703b4f594c9'))
paddle.fluid.layers.linear_lr_warmup (ArgSpec(args=['learning_rate', 'warmup_steps', 'start_lr', 'end_lr'], varargs=None, keywords=None, defaults=None), ('document', 'dc7292c456847ba41cfd318e9f7f4363')) paddle.fluid.layers.linear_lr_warmup (ArgSpec(args=['learning_rate', 'warmup_steps', 'start_lr', 'end_lr'], varargs=None, keywords=None, defaults=None), ('document', 'dc7292c456847ba41cfd318e9f7f4363'))
paddle.fluid.layers.Uniform ('paddle.fluid.layers.distributions.Uniform', ('document', 'af70e7003f437e7a8a9e28cded35c433')) paddle.fluid.layers.Uniform ('paddle.fluid.layers.distributions.Uniform', ('document', '9b1a9ebdd8ae18bf562486611ed74e59'))
paddle.fluid.layers.Uniform.__init__ (ArgSpec(args=['self', 'low', 'high'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.layers.Uniform.__init__ (ArgSpec(args=['self', 'low', 'high'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.layers.Uniform.entropy (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'ba59f9ce77af3c93e2b4c8af1801a24e')) paddle.fluid.layers.Uniform.entropy (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'cde9f1980a2be7939798b32ec8cd59e1'))
paddle.fluid.layers.Uniform.kl_divergence (ArgSpec(args=['self', 'other'], varargs=None, keywords=None, defaults=None), ('document', '3baee52abbed82d47e9588d9dfe2f42f')) paddle.fluid.layers.Uniform.kl_divergence (ArgSpec(args=['self', 'other'], varargs=None, keywords=None, defaults=None), ('document', '3baee52abbed82d47e9588d9dfe2f42f'))
paddle.fluid.layers.Uniform.log_prob (ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None), ('document', 'b79091014ceaffb6a7372a198a341c23')) paddle.fluid.layers.Uniform.log_prob (ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None), ('document', 'ad4ed169f86c00923621504c782010b0'))
paddle.fluid.layers.Uniform.sample (ArgSpec(args=['self', 'shape', 'seed'], varargs=None, keywords=None, defaults=(0,)), ('document', 'adac334af13f6984e991b3ecf12b8cb7')) paddle.fluid.layers.Uniform.sample (ArgSpec(args=['self', 'shape', 'seed'], varargs=None, keywords=None, defaults=(0,)), ('document', '9002ab4a80769211565b64298a770db5'))
paddle.fluid.layers.Normal ('paddle.fluid.layers.distributions.Normal', ('document', '3265262d0d8b3b32c6245979a5cdced9')) paddle.fluid.layers.Normal ('paddle.fluid.layers.distributions.Normal', ('document', '948f3a95ca14c952401e6a2ec30a35f9'))
paddle.fluid.layers.Normal.__init__ (ArgSpec(args=['self', 'loc', 'scale'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.layers.Normal.__init__ (ArgSpec(args=['self', 'loc', 'scale'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.layers.Normal.entropy (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'd2db47b1e62c037a2570fc526b93f518')) paddle.fluid.layers.Normal.entropy (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '254ff8081a9df3cb96db045411dfbcbd'))
paddle.fluid.layers.Normal.kl_divergence (ArgSpec(args=['self', 'other'], varargs=None, keywords=None, defaults=None), ('document', '2e8845cdf1129647e6fa6e816876cd3b')) paddle.fluid.layers.Normal.kl_divergence (ArgSpec(args=['self', 'other'], varargs=None, keywords=None, defaults=None), ('document', '9fc9bd26e5211e2c6ad703a7fba08e65'))
paddle.fluid.layers.Normal.log_prob (ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None), ('document', 'b79091014ceaffb6a7372a198a341c23')) paddle.fluid.layers.Normal.log_prob (ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None), ('document', 'ad4ed169f86c00923621504c782010b0'))
paddle.fluid.layers.Normal.sample (ArgSpec(args=['self', 'shape', 'seed'], varargs=None, keywords=None, defaults=(0,)), ('document', 'adac334af13f6984e991b3ecf12b8cb7')) paddle.fluid.layers.Normal.sample (ArgSpec(args=['self', 'shape', 'seed'], varargs=None, keywords=None, defaults=(0,)), ('document', '9002ab4a80769211565b64298a770db5'))
paddle.fluid.layers.Categorical ('paddle.fluid.layers.distributions.Categorical', ('document', '865c9dac8af6190e05588486ba091ee8')) paddle.fluid.layers.Categorical ('paddle.fluid.layers.distributions.Categorical', ('document', '865c9dac8af6190e05588486ba091ee8'))
paddle.fluid.layers.Categorical.__init__ (ArgSpec(args=['self', 'logits'], varargs=None, keywords=None, defaults=None), ('document', '933b96c9ebab8e2c1f6007a50287311e')) paddle.fluid.layers.Categorical.__init__ (ArgSpec(args=['self', 'logits'], varargs=None, keywords=None, defaults=None), ('document', '933b96c9ebab8e2c1f6007a50287311e'))
paddle.fluid.layers.Categorical.entropy (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'b360a2a7a4da07c2d268b329e09c82c1')) paddle.fluid.layers.Categorical.entropy (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'b360a2a7a4da07c2d268b329e09c82c1'))
......
...@@ -591,20 +591,36 @@ def iou_similarity(x, y, name=None): ...@@ -591,20 +591,36 @@ def iou_similarity(x, y, name=None):
${comment} ${comment}
Args: Args:
x(${x_type}): ${x_comment} x (Variable): ${x_comment}.The data type is float32 or float64.
y(${y_type}): ${y_comment} y (Variable): ${y_comment}.The data type is float32 or float64.
Returns: Returns:
out(${out_type}): ${out_comment} Variable: ${out_comment}.The data type is same with x.
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[4], dtype='float32') use_gpu = False
y = fluid.layers.data(name='y', shape=[4], dtype='float32') place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y) iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
""" """
helper = LayerHelper("iou_similarity", **locals()) helper = LayerHelper("iou_similarity", **locals())
if name is None: if name is None:
......
...@@ -135,12 +135,13 @@ class Uniform(Distribution): ...@@ -135,12 +135,13 @@ class Uniform(Distribution):
broadcasting (e.g., `high - low` is a valid operation). broadcasting (e.g., `high - low` is a valid operation).
Args: Args:
low(float|list|numpy.ndarray|Variable): The lower boundary of uniform distribution. low(float|list|numpy.ndarray|Variable): The lower boundary of uniform distribution.The data type is float32
high(float|list|numpy.ndarray|Variable): The higher boundary of uniform distribution. high(float|list|numpy.ndarray|Variable): The higher boundary of uniform distribution.The data type is float32
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
from paddle.fluid import layers from paddle.fluid import layers
from paddle.fluid.layers import Uniform from paddle.fluid.layers import Uniform
...@@ -158,19 +159,19 @@ class Uniform(Distribution): ...@@ -158,19 +159,19 @@ class Uniform(Distribution):
# With broadcasting: # With broadcasting:
u4 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) u4 = Uniform(low=3.0, high=[5.0, 6.0, 7.0])
# Variable as input # Complete example
dims = 3 value_npdata = np.array([0.8], dtype="float32")
value_tensor = layers.create_tensor(dtype="float32")
low = layers.data(name='low', shape=[dims], dtype='float32') layers.assign(value_npdata, value_tensor)
high = layers.data(name='high', shape=[dims], dtype='float32')
values = layers.data(name='values', shape=[dims], dtype='float32')
uniform = Uniform(low, high) uniform = Uniform([0.], [2.])
sample = uniform.sample([2, 3]) sample = uniform.sample([2])
# a random tensor created by uniform distribution with shape: [2, 1]
entropy = uniform.entropy() entropy = uniform.entropy()
lp = uniform.log_prob(values) # [0.6931472] with shape: [1]
lp = uniform.log_prob(value_tensor)
# [-0.6931472] with shape: [1]
""" """
def __init__(self, low, high): def __init__(self, low, high):
...@@ -193,7 +194,7 @@ class Uniform(Distribution): ...@@ -193,7 +194,7 @@ class Uniform(Distribution):
seed (int): Python integer number. seed (int): Python integer number.
Returns: Returns:
Variable: A tensor with prepended dimensions shape. Variable: A tensor with prepended dimensions shape.The data type is float32.
""" """
batch_shape = list((self.low + self.high).shape) batch_shape = list((self.low + self.high).shape)
...@@ -224,7 +225,7 @@ class Uniform(Distribution): ...@@ -224,7 +225,7 @@ class Uniform(Distribution):
value (Variable): The input tensor. value (Variable): The input tensor.
Returns: Returns:
Variable: log probability. Variable: log probability.The data type is same with value.
""" """
lb_bool = control_flow.less_than(self.low, value) lb_bool = control_flow.less_than(self.low, value)
...@@ -237,7 +238,7 @@ class Uniform(Distribution): ...@@ -237,7 +238,7 @@ class Uniform(Distribution):
"""Shannon entropy in nats. """Shannon entropy in nats.
Returns: Returns:
Variable: Shannon entropy of uniform distribution. Variable: Shannon entropy of uniform distribution.The data type is float32.
""" """
return nn.log(self.high - self.low) return nn.log(self.high - self.low)
...@@ -265,8 +266,8 @@ class Normal(Distribution): ...@@ -265,8 +266,8 @@ class Normal(Distribution):
* :math:`Z`: is the normalization constant. * :math:`Z`: is the normalization constant.
Args: Args:
loc(float|list|numpy.ndarray|Variable): The mean of normal distribution. loc(float|list|numpy.ndarray|Variable): The mean of normal distribution.The data type is float32.
scale(float|list|numpy.ndarray|Variable): The std of normal distribution. scale(float|list|numpy.ndarray|Variable): The std of normal distribution.The data type is float32.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -278,36 +279,34 @@ class Normal(Distribution): ...@@ -278,36 +279,34 @@ class Normal(Distribution):
dist = Normal(loc=0., scale=3.) dist = Normal(loc=0., scale=3.)
# Define a batch of two scalar valued Normals. # Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22. # The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = Normal(loc=[1, 2.], scale=[11, 22.]) dist = Normal(loc=[1., 2.], scale=[11., 22.])
# Get 3 samples, returning a 3 x 2 tensor. # Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3]) dist.sample([3])
# Define a batch of two scalar valued Normals. # Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations. # Both have mean 1, but different standard deviations.
dist = Normal(loc=1., scale=[11, 22.]) dist = Normal(loc=1., scale=[11., 22.])
# Define a batch of two scalar valued Normals. # Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations. # Both have mean 1, but different standard deviations.
dist = Normal(loc=1., scale=[11, 22.]) dist = Normal(loc=1., scale=[11., 22.])
# Variable as input # Complete example
dims = 3 value_npdata = np.array([0.8], dtype="float32")
value_tensor = layers.create_tensor(dtype="float32")
loc = layers.data(name='loc', shape=[dims], dtype='float32') layers.assign(value_npdata, value_tensor)
scale = layers.data(name='scale', shape=[dims], dtype='float32')
other_loc = layers.data( normal_a = Normal([0.], [1.])
name='other_loc', shape=[dims], dtype='float32') normal_b = Normal([0.5], [2.])
other_scale = layers.data(
name='other_scale', shape=[dims], dtype='float32') sample = normal_a.sample([2])
values = layers.data(name='values', shape=[dims], dtype='float32') # a random tensor created by normal distribution with shape: [2, 1]
entropy = normal_a.entropy()
normal = Normal(loc, scale) # [1.4189385] with shape: [1]
other_normal = Normal(other_loc, other_scale) lp = normal_a.log_prob(value_tensor)
# [-1.2389386] with shape: [1]
sample = normal.sample([2, 3]) kl = normal_a.kl_divergence(normal_b)
entropy = normal.entropy() # [0.34939718] with shape: [1]
lp = normal.log_prob(values)
kl = normal.kl_divergence(other_normal)
""" """
def __init__(self, loc, scale): def __init__(self, loc, scale):
...@@ -330,7 +329,7 @@ class Normal(Distribution): ...@@ -330,7 +329,7 @@ class Normal(Distribution):
seed (int): Python integer number. seed (int): Python integer number.
Returns: Returns:
Variable: A tensor with prepended dimensions shape. Variable: A tensor with prepended dimensions shape.The data type is float32.
""" """
batch_shape = list((self.loc + self.scale).shape) batch_shape = list((self.loc + self.scale).shape)
...@@ -356,7 +355,7 @@ class Normal(Distribution): ...@@ -356,7 +355,7 @@ class Normal(Distribution):
"""Shannon entropy in nats. """Shannon entropy in nats.
Returns: Returns:
Variable: Shannon entropy of normal distribution. Variable: Shannon entropy of normal distribution.The data type is float32.
""" """
batch_shape = list((self.loc + self.scale).shape) batch_shape = list((self.loc + self.scale).shape)
...@@ -372,7 +371,7 @@ class Normal(Distribution): ...@@ -372,7 +371,7 @@ class Normal(Distribution):
value (Variable): The input tensor. value (Variable): The input tensor.
Returns: Returns:
Variable: log probability. Variable: log probability.The data type is same with value.
""" """
var = self.scale * self.scale var = self.scale * self.scale
...@@ -387,7 +386,7 @@ class Normal(Distribution): ...@@ -387,7 +386,7 @@ class Normal(Distribution):
other (Normal): instance of Normal. other (Normal): instance of Normal.
Returns: Returns:
Variable: kl-divergence between two normal distributions. Variable: kl-divergence between two normal distributions.The data type is float32.
""" """
assert isinstance(other, Normal), "another distribution must be Normal" assert isinstance(other, Normal), "another distribution must be Normal"
......
...@@ -13674,32 +13674,48 @@ def affine_channel(x, ...@@ -13674,32 +13674,48 @@ def affine_channel(x,
Args: Args:
x (Variable): Feature map input can be a 4D tensor with order NCHW x (Variable): Feature map input can be a 4D tensor with order NCHW
or NHWC. It also can be a 2D tensor and the affine transformation or NHWC. It also can be a 2D tensor and the affine transformation
is applied in the second dimension. is applied in the second dimension.The data type is float32 or float64.
scale (Variable): 1D input of shape (C), the c-th element is the scale scale (Variable): 1D input of shape (C), the c-th element is the scale
factor of the affine transformation for the c-th channel of factor of the affine transformation for the c-th channel of
the input. the input.The data type is float32 or float64.
bias (Variable): 1D input of shape (C), the c-th element is the bias bias (Variable): 1D input of shape (C), the c-th element is the bias
of the affine transformation for the c-th channel of the input. of the affine transformation for the c-th channel of the input.
data_layout (string, default NCHW): NCHW or NHWC. If input is 2D The data type is float32 or float64.
data_layout (str, default NCHW): NCHW or NHWC. If input is 2D
tensor, you can ignore data_layout. tensor, you can ignore data_layout.
name (str, default None): The name of this layer. name (str, default None): The name of this layer. For more information,
please refer to :ref:`api_guide_Name` .
act (str, default None): Activation to be applied to the output of this layer. act (str, default None): Activation to be applied to the output of this layer.
Returns: Returns:
out (Variable): A tensor of the same shape and data layout with x. Variable: A tensor which has the same shape, data layout and data type with x.
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
data = fluid.layers.data(name='data', shape=[3, 32, 32],
dtype='float32') use_gpu = False
input_scale = fluid.layers.create_parameter(shape=[3], place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
dtype="float32") exe = fluid.Executor(place)
input_bias = fluid.layers.create_parameter(shape=[3],
dtype="float32") data = fluid.data(name='data', shape=[None, 1, 2, 2], dtype='float32')
input_scale = fluid.layers.create_parameter(shape=[1], dtype="float32",
default_initializer=fluid.initializer.Constant(2.0))
input_bias = fluid.layers.create_parameter(shape=[1],dtype="float32",
default_initializer=fluid.initializer.Constant(0.5))
out = fluid.layers.affine_channel(data,scale=input_scale, out = fluid.layers.affine_channel(data,scale=input_scale,
bias=input_bias) bias=input_bias)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_array] = exe.run(test_program,
fetch_list=out,
feed={'data': np.ones([1,1,2,2]).astype('float32')})
# out_array is [[[[2.5, 2.5],
# [2.5, 2.5]]]] with shape: [1, 1, 2, 2]
""" """
helper = LayerHelper("affine_channel", **locals()) helper = LayerHelper("affine_channel", **locals())
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册