From 2057df7ac0865d3771af624b6572851c6b8c50e7 Mon Sep 17 00:00:00 2001 From: Feiyu Chan Date: Fri, 6 Dec 2019 13:56:59 +0800 Subject: [PATCH] add fluid.layers.gelu & doc (#21515) Add a python interface for Gelu. Add documentation for fluid.layers.gelu. --- python/paddle/fluid/layers/ops.py | 79 +++++++++++++++++++ .../fluid/tests/unittests/test_gelu_op.py | 54 +++++++++++++ 2 files changed, 133 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/test_gelu_op.py diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 37784805f0..072b95969c 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -239,3 +239,82 @@ Examples: # array([[ 0.21134382, -0. , 0.32876605], # [-0. , -0. , 1.0013918 ]], dtype=float32) """ + +__all__ += ['gelu'] + +_gelu_ = generate_layer_fn('gelu') + + +def gelu(x): + locals_var = locals().copy() + kwargs = dict() + for name, val in locals_var.items(): + if val is not None: + kwargs[name] = val + return _gelu_(**kwargs) + + +gelu.__doc__ = """ +:strong:`GeLU Activation Operator` +For more details, see [Gaussian Error Linear Units](https://arxiv.org/abs/1606.08415). + +Equation: + .. math:: + out = 0.5 * x * (1 + erf(\\frac{x}{\\sqrt{2}})) + +Args: + + x(Variable): The input of GeLU op, Tensor or LoDTensor, dtype: float32 or float64. + +Returns: + + Variable: The output of GeLU op, Tensor or LoDTensor, dtype: float32 or float64, the same as the input, shape: the same as the input. + +Examples: + + .. code-block:: python + + # declarative mode + import numpy as np + from paddle import fluid + + x = fluid.data(name="x", shape=(-1, 3), dtype="float32") + y = fluid.layers.gelu(x) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + start = fluid.default_startup_program() + main = fluid.default_main_program() + + data = np.random.randn(2, 3).astype("float32") + exe.run(start) + + y_np, = exe.run(main, feed={"x": data}, fetch_list=[y]) + + data + # array([[ 0.87165993, -1.0541513 , -0.37214822], + # [ 0.15647964, 0.32496083, 0.33045998]], dtype=float32) + y_np + # array([[ 0.70456535, -0.15380788, -0.13207214], + # [ 0.08796856, 0.20387867, 0.2080159 ]], dtype=float32) + + .. code-block:: python + + # imperative mode + import numpy as np + from paddle import fluid + import paddle.fluid.dygraph as dg + + data = np.random.randn(2, 3).astype("float32") + place = fluid.CPUPlace() + with dg.guard(place) as g: + x = dg.to_variable(data) + y = fluid.layers.gelu(x) + y_np = y.numpy() + data + # array([[ 0.87165993, -1.0541513 , -0.37214822], + # [ 0.15647964, 0.32496083, 0.33045998]], dtype=float32) + y_np + # array([[ 0.70456535, -0.15380788, -0.13207214], + # [ 0.08796856, 0.20387867, 0.2080159 ]], dtype=float32) +""" diff --git a/python/paddle/fluid/tests/unittests/test_gelu_op.py b/python/paddle/fluid/tests/unittests/test_gelu_op.py new file mode 100644 index 0000000000..5f722ab8e0 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_gelu_op.py @@ -0,0 +1,54 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from scipy.special import erf +import paddle.fluid as fluid +import paddle.fluid.dygraph as dg + + +class TestGeluOp(unittest.TestCase): + def _test_case1_cpu(self): + x = np.random.uniform(-1, 1, size=(11, 17)).astype(np.float32) + y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2))) + + place = fluid.CPUPlace() + with dg.guard(place) as g: + x_var = dg.to_variable(x) + y_var = fluid.layers.gelu(x_var) + y_test = y_var.numpy() + self.assertTrue(np.allclose(y_ref, y_test, rtol=1e-05, atol=1e-08)) + + def _test_case1_gpu(self): + x = np.random.uniform(-1, 1, size=(11, 17)).astype(np.float32) + y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2))) + + place = fluid.CUDAPlace(0) + with dg.guard(place) as g: + x_var = dg.to_variable(x) + y_var = fluid.layers.gelu(x_var) + y_test = y_var.numpy() + self.assertTrue(np.allclose(y_ref, y_test, rtol=1e-05, atol=1e-08)) + + def test_cases(self): + self._test_case1_cpu() + if fluid.is_compiled_with_cuda(): + self._test_case1_gpu() + + +if __name__ == '__main__': + unittest.main() -- GitLab