diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 6e7fdea742d4bb8a5e8c55cdb6ab870565c34e21..143b29ab7c267a779971def2ebe86ccafae94b4e 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -170,7 +170,7 @@ paddle.fluid.layers.pow (ArgSpec(args=['x', 'factor', 'name'], varargs=None, key paddle.fluid.layers.stanh (ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], varargs=None, keywords=None, defaults=(0.6666666666666666, 1.7159, None)), ('document', '959936a477efc6c1447a9c8bf8ce94bb')) paddle.fluid.layers.hard_sigmoid (ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None)), ('document', '607d79ca873bee40eed1c79a96611591')) paddle.fluid.layers.swish (ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'ef745e55a48763ee7b46b21a81dc7e84')) -paddle.fluid.layers.prelu (ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'f6acef7ff7d887e49ff499fbb1dad4a9')) +paddle.fluid.layers.prelu (ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '2da40e447716338affebfe058d05d9a9')) paddle.fluid.layers.brelu (ArgSpec(args=['x', 't_min', 't_max', 'name'], varargs=None, keywords=None, defaults=(0.0, 24.0, None)), ('document', '3db337c195e156e6ef2b8b4a57113600')) paddle.fluid.layers.leaky_relu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(0.02, None)), ('document', 'f878486c82b576938151daad0de995a0')) paddle.fluid.layers.soft_relu (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(40.0, None)), ('document', '3490ed5c9835ae039a82979daf3918a4')) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 8d9343b924550964a920bfde28adce2663c57618..48a5f5af72efbe473cf1cbddcba8f896d2d8107b 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -8830,14 +8830,19 @@ def prelu(x, mode, param_attr=None, name=None): .. math:: y = \max(0, x) + \\alpha * \min(0, x) + There are three modes for the activation: + + .. code-block:: text + + all: All elements share same alpha. + channel: Elements in same channel share same alpha. + element: All elements do not share alpha. Each element has its own alpha. + Args: x (Variable): The input tensor. + mode (string): The mode for weight sharing. param_attr(ParamAttr|None): The parameter attribute for the learnable - weight (alpha). - mode (string): The mode for weight sharing. It supports all, channel - and element. all: all elements share same weight - channel:elements in a channel share same weight - element:each element has a weight + weight (alpha), it can be create by ParamAttr. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. @@ -8848,9 +8853,13 @@ def prelu(x, mode, param_attr=None, name=None): .. code-block:: python - x = fluid.layers.data(name="x", shape=[10,10], dtype="float32") + import paddle.fluid as fluid + from paddle.fluid.param_attr import ParamAttr + x = fluid.layers.data(name="x", shape=[5,10,10], dtype="float32") mode = 'channel' - output = fluid.layers.prelu(x,mode) + output = fluid.layers.prelu( + x,mode,param_attr=ParamAttr(name='alpha')) + """ helper = LayerHelper('prelu', **locals()) if mode not in ['all', 'channel', 'element']: