diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 078b43e4843e45a1bf24462d4333908ac2d7cf2e..e1bb927870b6fc055ea50bc9fe3c4430a8045b9d 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -168,7 +168,7 @@ paddle.fluid.layers.sequence_first_step (ArgSpec(args=['input'], varargs=None, k paddle.fluid.layers.sequence_last_step (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '34372f58331247749e8b0a1663cf233b')) paddle.fluid.layers.sequence_slice (ArgSpec(args=['input', 'offset', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '39fbc5437be389f6c0c769f82fc1fba2')) paddle.fluid.layers.dropout (ArgSpec(args=['x', 'dropout_prob', 'is_test', 'seed', 'name', 'dropout_implementation'], varargs=None, keywords=None, defaults=(False, None, None, 'downgrade_in_infer')), ('document', '392dd4bad607fd853f71fec71801044f')) -paddle.fluid.layers.split (ArgSpec(args=['input', 'num_or_sections', 'dim', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '78cf3a7323d1a7697658242e13f63759')) +paddle.fluid.layers.split (ArgSpec(args=['input', 'num_or_sections', 'dim', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '64073050d3f172d71ace73d7bbb4168e')) paddle.fluid.layers.ctc_greedy_decoder (ArgSpec(args=['input', 'blank', 'input_length', 'padding_value', 'name'], varargs=None, keywords=None, defaults=(None, 0, None)), ('document', '31e0cbec2898efae95853034adadfe2b')) paddle.fluid.layers.edit_distance (ArgSpec(args=['input', 'label', 'normalized', 'ignored_tokens', 'input_length', 'label_length'], varargs=None, keywords=None, defaults=(True, None, None, None)), ('document', '77cbfb28cd2fc589f589c7013c5086cd')) paddle.fluid.layers.l2_normalize (ArgSpec(args=['x', 'axis', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-12, None)), ('document', 'c1df110ea65998984f564c5c10abc54a')) @@ -210,14 +210,14 @@ paddle.fluid.layers.resize_trilinear (ArgSpec(args=['input', 'out_shape', 'scale paddle.fluid.layers.resize_nearest (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'data_format'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 'NCHW')), ('document', '0107a5cbae1aef3f381d3d769a6068eb')) paddle.fluid.layers.gather (ArgSpec(args=['input', 'index', 'overwrite'], varargs=None, keywords=None, defaults=(True,)), ('document', 'f985c9b66e3aec96fa753a8eb44c991c')) paddle.fluid.layers.gather_nd (ArgSpec(args=['input', 'index', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a7d625028525167b138106f574dffdf9')) -paddle.fluid.layers.scatter (ArgSpec(args=['input', 'index', 'updates', 'name', 'overwrite'], varargs=None, keywords=None, defaults=(None, True)), ('document', '69b22affd4a6326502af166f04c095ab')) +paddle.fluid.layers.scatter (ArgSpec(args=['input', 'index', 'updates', 'name', 'overwrite'], varargs=None, keywords=None, defaults=(None, True)), ('document', '3f94c3348dc79b7b40a839d31a3eaa84')) paddle.fluid.layers.scatter_nd_add (ArgSpec(args=['ref', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '2607b5c9369fbc52f208de066a80fc25')) paddle.fluid.layers.scatter_nd (ArgSpec(args=['index', 'updates', 'shape', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e43f1d3a938b35da246aea3e72a020ec')) paddle.fluid.layers.sequence_scatter (ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'abe3f714120117a5a3d3e639853932bf')) paddle.fluid.layers.random_crop (ArgSpec(args=['x', 'shape', 'seed'], varargs=None, keywords=None, defaults=(None,)), ('document', '042af0b8abea96b40c22f6e70d99e042')) paddle.fluid.layers.mean_iou (ArgSpec(args=['input', 'label', 'num_classes'], varargs=None, keywords=None, defaults=None), ('document', 'dea29c0c3cdbd5b498afef60e58c9d7c')) paddle.fluid.layers.relu (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '0942c174f4f6fb274976d4357356f6a2')) -paddle.fluid.layers.selu (ArgSpec(args=['x', 'scale', 'alpha', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'f93c61f5b0bf933cd425a64dca2c4fdd')) +paddle.fluid.layers.selu (ArgSpec(args=['x', 'scale', 'alpha', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '3ee40bc474b4bccdaf112d3f0d847318')) paddle.fluid.layers.log (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '02f668664e3bfc4df6c00d7363467140')) paddle.fluid.layers.crop (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '32196a194f757b4da114a595a5bc6414')) paddle.fluid.layers.crop_tensor (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'd460aaf35afbbeb9beea4789aa6e4343')) @@ -232,7 +232,7 @@ paddle.fluid.layers.swish (ArgSpec(args=['x', 'beta', 'name'], varargs=None, key paddle.fluid.layers.prelu (ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'cb417a61f701c937f33d057fe85203ab')) paddle.fluid.layers.brelu (ArgSpec(args=['x', 't_min', 't_max', 'name'], varargs=None, keywords=None, defaults=(0.0, 24.0, None)), ('document', '49580538249a52c857fce75c94ad8af7')) paddle.fluid.layers.leaky_relu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(0.02, None)), ('document', '1eb3009c69060299ec87949ee0d4b9ae')) -paddle.fluid.layers.soft_relu (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(40.0, None)), ('document', '6455afd2498b00198f53f83d63d6c6a4')) +paddle.fluid.layers.soft_relu (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(40.0, None)), ('document', 'f14efa9e5fd2e8b3d976cdda38eff43f')) paddle.fluid.layers.flatten (ArgSpec(args=['x', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', '424ff350578992f201f2c5c30959ef89')) paddle.fluid.layers.sequence_mask (ArgSpec(args=['x', 'maxlen', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 'int64', None)), ('document', '6c3f916921b24edaad220f1fcbf039de')) paddle.fluid.layers.stack (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', 'a76f347bf27ffe21b990340d5d9524d5')) @@ -243,7 +243,7 @@ paddle.fluid.layers.unique (ArgSpec(args=['x', 'dtype'], varargs=None, keywords= paddle.fluid.layers.unique_with_counts (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=('int32',)), ('document', '4496682f302007019e458a2f30d8a7c3')) paddle.fluid.layers.expand (ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e93a1b102ab64b247c1b774e60d4c0d0')) paddle.fluid.layers.sequence_concat (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'f47f9d207ac60b6f294087bcb1b64ae8')) -paddle.fluid.layers.scale (ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)), ('document', '463e4713806e5adaa4d20a41e2218453')) +paddle.fluid.layers.scale (ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)), ('document', 'a33547d41970fa3c59e6b2f21fe5f76d')) paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '0c9c260e7738165a099f6a76da0b7814')) paddle.fluid.layers.elementwise_div (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '4701ffd4eb4b7ee19756d3b90532c5f2')) paddle.fluid.layers.elementwise_sub (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'eab2518a801f3f393cf38fddc899c941')) @@ -260,7 +260,7 @@ paddle.fluid.layers.gaussian_random_batch_size_like (ArgSpec(args=['input', 'sha paddle.fluid.layers.sum (ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None), ('document', 'f4b60847cb0f1ae00823ba6fb1b11310')) paddle.fluid.layers.slice (ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None), ('document', '8c622791994a0d657d8c6c9cefa5bf34')) paddle.fluid.layers.strided_slice (ArgSpec(args=['input', 'axes', 'starts', 'ends', 'strides'], varargs=None, keywords=None, defaults=None), ('document', '340d8d656272ea396b441aab848429a2')) -paddle.fluid.layers.shape (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'bf61c8f79d795a8371bdb3b5468aa82b')) +paddle.fluid.layers.shape (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '39534cccdb8e727e287316c7c42e6663')) paddle.fluid.layers.rank (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'a4492cf0393c6f70e4e25c681dcd73f4')) paddle.fluid.layers.size (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'cf2e156beae36378722666c4c33bebfe')) paddle.fluid.layers.logical_and (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '12db97c6c459c0f240ec7006737174f2')) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 579ab2f93f56745da76b64441434f8143ae17d85..1058ef5465b26a519e26a13f1f66d462c672f67c 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -6334,25 +6334,24 @@ def reduce_any(input, dim=None, keep_dim=False, name=None): def split(input, num_or_sections, dim=-1, name=None): """ - Split the input tensor into multiple sub-tensors. + Split the input tensor into multiple sub-Tensors. Args: - input (Variable): The input variable which is a Tensor or LoDTensor. - num_or_sections (int|list): If :attr:`num_or_sections` is an integer, - then the integer indicates the number of equal sized sub-tensors - that the tensor will be divided into. If :attr:`num_or_sections` + input (Variable): The input variable which is an N-D Tensor or LoDTensor, data type being float32, float64, int32 or int64. + num_or_sections (int|list): Integer or list of Integers. If :attr:`num_or_sections` is an integer, + then the integer indicates the number of equal sized sub-Tensors + that the Tensor will be divided into. If :attr:`num_or_sections` is a list of integers, the length of list indicates the number of - sub-tensors and the integers indicate the sizes of sub-tensors' - :attr:`dim` dimension orderly. + sub-Tensors and the integers indicate the sizes of sub-Tensors' + :attr:`dim` dimension orderly. The the length of the list mustn't be larger than the Tensor's size of :attr:`dim` . dim (int): The dimension along which to split. If :math:`dim < 0`, the dimension to split along is :math:`rank(input) + dim`. - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: - list(Variable): The list of segmented tensor variables. + list(Variable): The list of segmented Tensor variables. - Examples: + Example: .. code-block:: python import paddle.fluid as fluid @@ -6366,7 +6365,7 @@ def split(input, num_or_sections, dim=-1, name=None): # x1.shape [-1, 3, 3, 5] # x2.shape [-1, 3, 3, 5] - x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=2) + x0, x1, x2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=2) # x0.shape [-1, 3, 2, 5] # x1.shape [-1, 3, 3, 5] # x2.shape [-1, 3, 4, 5] @@ -10169,39 +10168,71 @@ def scatter(input, index, updates, name=None, overwrite=True): """ **Scatter Layer** - Output is obtained by updating the input on selected indices on the first - axis. + Output is obtained by updating the input on selected indices based on updates. - .. math:: - - Out = X - Out[Ids] = Updates + .. code-block:: python + import numpy as np + + #input: + input = np.array([[1, 1], [2, 2], [3, 3]]) + index = np.array([2, 1, 0, 1]) + # shape of updates should be the same as input + # shape of updates with dim > 1 should be the same as input + updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]) + overwrite = False + + # calculation: + if not overwrite: + for i in range(len(index)): + input[index[i]] = np.zeros((2)) + + for i in range(len(index)): + if (overwrite): + input[index[i]] = updates[i] + else: + input[index[i]] += updates[i] + # output: + out = np.array([[3, 3], [6, 6], [1, 1]]) + out.shape # [3, 2] Args: - input (Variable): The source input with rank>=1. - index (Variable): The index input with rank=1. Its dtype should be - int32 or int64 as it is used as indexes. - updates (Variable): The updated value of scatter op. - name (str|None): The output variable name. Default None. - overwrite (bool): The mode that updating the output when has same index. + input (Variable): The input N-D Tensor with rank>=1. Data type can be float32. + index (Variable): The index 1-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length. + updates (Variable): update input with updates parameter based on index. shape should be the same as input, and dim value with dim > 1 shoule be the same as input. + name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . + overwrite (bool): The mode that updating the output when there are same indices. If True, use the overwrite mode to update the output of the same index, if False, use the accumulate mode to update the output of the same index. - Default value is True.You can set overwrite=False to implement scatter_add. + Default value is True. Returns: - output (Variable): The output is a tensor with the same shape as input. + Variable(Tensor|LoDTensor): The output is a Tensor with the same shape as input. Examples: .. code-block:: python + import numpy as np import paddle.fluid as fluid - input = fluid.layers.data(name='data', shape=[3, 5, 9], dtype='float32', append_batch_size=False) - index = fluid.layers.data(name='index', shape=[3], dtype='int64', append_batch_size=False) - updates = fluid.layers.data(name='update', shape=[3, 5, 9], dtype='float32', append_batch_size=False) + input = fluid.layers.data(name='data', shape=[3, 2], dtype='float32', append_batch_size=False) + index = fluid.layers.data(name='index', shape=[4], dtype='int64', append_batch_size=False) + updates = fluid.layers.data(name='update', shape=[4, 2], dtype='float32', append_batch_size=False) + + output = fluid.layers.scatter(input, index, updates, overwrite=False) - output = fluid.layers.scatter(input, index, updates) + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + in_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32) + index_data = np.array([2, 1, 0, 1]).astype(np.int64) + update_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'data':in_data, "index":index_data, "update":update_data}, fetch_list=[output]) + print(res) + # [array([[3., 3.], + # [6., 6.], + # [1., 1.]], dtype=float32)] """ helper = LayerHelper('scatter', **locals()) dtype = helper.input_dtype() @@ -10521,36 +10552,56 @@ def relu(x, name=None): return out -@templatedoc() def selu(x, scale=None, alpha=None, name=None): """ - ${comment} + Selu Operator. + + The equation is: + + .. math:: + selu= \\lambda* + \\begin{cases} + x &\\quad \\text{ if } x>0 \n + \\alpha * e^x - \\alpha &\\quad \\text{ if } x<=0 + \\end{cases} + + + The input `X` can carry the LoD (Level of Details) information, + or not. And the output shares the LoD information with input `X`. Args: - x (Variable): The input tensor. - scale(float, None): If the scale is not set, + x (Variable): The input N-D Tensor. + scale(float, optional): lambda in selu activation function, the default value is 1.0507009873554804934193349852946. For more information about this value, please refer to: https://arxiv.org/abs/1706.02515. - alpha(float, None): If the alpha is not set, + alpha(float, optional): alpha in selu activation function, the default value is 1.6732632423543772848170429916717. For more information about this value, please refer to: https://arxiv.org/abs/1706.02515. - name (str|None, default None): A name for this layer If set None, - the layer will be named automatically. + name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . + Returns: - Variable: The output tensor with the same shape as input. + Variable(Tensor|LoDTensor): The output Tensor or LoDTensor with the same shape and LoD information as input. Examples: .. code-block:: python import paddle.fluid as fluid - - input = fluid.layers.data( - name="input", shape=[3, 9, 5], dtype="float32") - output = fluid.layers.selu(input) + import numpy as np + + inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32") + output = fluid.layers.selu(inputs) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.array([[0, 1],[2, 3]]).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) + print(res) # [array([[0. , 1.050701],[2.101402, 3.152103]], dtype=float32)] """ helper = LayerHelper('selu', **locals()) dtype = helper.input_dtype(input_param_name='x') @@ -11618,26 +11669,37 @@ def leaky_relu(x, alpha=0.02, name=None): return out -@templatedoc() def soft_relu(x, threshold=40.0, name=None): """ - ${comment} + SoftRelu Activation Operator. + + $out = \ln(1 + \exp(\max(\min(x, threshold), -threshold)))$ + Args: - x(${x_type}): ${x_comment} - threshold(${threshold_type}|40.0): ${threshold_comment} - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + x(Variable): Input of soft_relu operator. Data type can be float32, float64. + threshold(float, optional): The threshold value of soft_relu, default value being 40.0. + name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . + Returns: - output(${out_type}): ${out_comment} + Variable(Tensor|LoDTensor)): Output of soft_relu operator, shape and LoD same as input. Examples: .. code-block:: python import paddle.fluid as fluid - - x = fluid.layers.data(name="x", shape=[3,16,16], dtype="float32") - y = fluid.layers.soft_relu(x, threshold=20.0) + import numpy as np + + inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32") + output = fluid.layers.soft_relu(inputs, threshold=20.0) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.array([[0, 1],[2, 3]]).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) + print(res) # [array([[0.6931472, 1.3132616], [2.126928 , 3.0485873]], dtype=float32)] """ helper = LayerHelper('soft_relu', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -12771,19 +12833,27 @@ def shape(input): Get the shape of the input. Args: - input (Variable): The input variable. + input (Variable): The input N-D Tensor. Datatype can be float32, float64, int32, int64. Returns: - Variable: The shape of the input variable. + Variable (Tensor): The shape of the input variable. Examples: .. code-block:: python import paddle.fluid as fluid + import numpy as np - input = fluid.layers.data( - name="input", shape=[3, 100, 100], dtype="float32") - out = fluid.layers.shape(input) + inputs = fluid.layers.data(name="x", shape=[3, 100, 100], dtype="float32") + output = fluid.layers.shape(inputs) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.ones((3, 100, 100)).astype(np.float32) + + res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) + print(res) # [array([ 3, 100, 100], dtype=int32)] """ helper = LayerHelper('shape', **locals()) @@ -12906,29 +12976,49 @@ def _elementwise_op(helper): return helper.append_activation(out) -@templatedoc() def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): """ - ${comment} + Scale operator. + + Putting scale and bias to the input Tensor as following: + + ``bias_after_scale`` is True: + + .. math:: + Out=scale*X+bias + + ``bias_after_scale`` is False: + + .. math:: + Out=scale*(X+bias) Args: - x(${x_type}): ${x_comment} - scale(${scale_type}): ${scale_comment} - bias(${bias_type}): ${bias_comment} - bias_after_scale(${bias_after_scale_type}): ${bias_after_scale_comment} - act(basestring|None): Activation applied to the output. - name(basestring|None): Name of the output. + x(Variable): Input N-D Tensor of scale operator. Data type can be float32, float64, int8, int16, int32, int64, uint8. + scale(float): The scale factor of the input. + bias(float): The bias to be put on the input. + bias_after_scale(bool): Apply bias addition after or before scaling. It is useful for numeric stability in some circumstances. + act(str, optional): Activation applied to the output such as tanh, softmax, sigmoid, relu. + name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: - out(${out_type}): ${out_comment} + Variable(Tensor|LoDTensor): Output tensor of scale operator, with shape and data type same as input. Examples: .. code-block:: python import paddle.fluid as fluid + import numpy as np + + inputs = fluid.layers.data(name="x", shape=[2, 3], dtype='float32') + output = fluid.layers.scale(inputs, scale = 2.0, bias = 1.0) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - x = fluid.layers.data(name="X", shape=[1, 2, 5, 5], dtype='float32') - y = fluid.layers.scale(x, scale = 2.0, bias = 1.0) + res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output]) + print(res) # [array([[ 3., 5., 7.], [ 9., 11., 13.]], dtype=float32)] """ helper = LayerHelper('scale', **locals())