From 73f421f7820adc31ba899d53dcefe29f638a0bb8 Mon Sep 17 00:00:00 2001 From: guofei <52460041+gfwm2013@users.noreply.github.com> Date: Mon, 13 Apr 2020 13:32:43 +0800 Subject: [PATCH] Add new API : randn (#23211) * Add new API : randn test=develop * Add new API : randn test=develop * Add new API : randn test=develop * Add new API : randn test=develop * aAdd new API : randn test=develop * Add new API : randn test=develop * Add new API : randn test=develop * Add new API : randn test=develop * Add new API : randn test=develop * Add new API : randn test=develop * Add new API : randn test=develop * Add new API : randn test=develop --- python/paddle/__init__.py | 2 +- .../fluid/tests/unittests/test_randn_op.py | 109 +++++++++++++++++ python/paddle/tensor/__init__.py | 2 +- python/paddle/tensor/random.py | 111 +++++++++++++++++- python/setup.py.in | 1 + 5 files changed, 222 insertions(+), 3 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_randn_op.py diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 9e2bf8c5ee..7a9101f101 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -87,7 +87,7 @@ from .tensor.logic import elementwise_equal #DEFINE_ALIAS # from .tensor.random import gaussin #DEFINE_ALIAS # from .tensor.random import uniform #DEFINE_ALIAS # from .tensor.random import shuffle #DEFINE_ALIAS -# from .tensor.random import randn #DEFINE_ALIAS +from .tensor.random import randn #DEFINE_ALIAS from .tensor.random import randperm # from .tensor.random import rand #DEFINE_ALIAS from .tensor.random import randint #DEFINE_ALIAS diff --git a/python/paddle/fluid/tests/unittests/test_randn_op.py b/python/paddle/fluid/tests/unittests/test_randn_op.py new file mode 100644 index 0000000000..808e5a08fd --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_randn_op.py @@ -0,0 +1,109 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid import Program, program_guard + + +class TestRandnOp(unittest.TestCase): + def test_api(self): + x1 = paddle.randn(shape=[1000, 784], dtype='float32') + x2 = paddle.randn(shape=[1000, 784], dtype='float64') + x3 = fluid.layers.fill_constant( + shape=[1000, 784], dtype='float32', value=0) + paddle.randn(shape=[1000, 784], out=x3, dtype='float32') + x4 = paddle.randn(shape=[1000, 784], dtype='float32', device='cpu') + x5 = paddle.randn(shape=[1000, 784], dtype='float32', device='gpu') + x6 = paddle.randn( + shape=[1000, 784], + dtype='float32', + device='gpu', + stop_gradient=False) + + place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + exe = fluid.Executor(place) + res = exe.run(fluid.default_main_program(), + feed={}, + fetch_list=[x1, x2, x3, x4, x5, x6]) + + self.assertAlmostEqual(np.mean(res[0]), .0, delta=0.1) + self.assertAlmostEqual(np.std(res[0]), 1., delta=0.1) + self.assertAlmostEqual(np.mean(res[1]), .0, delta=0.1) + self.assertAlmostEqual(np.std(res[1]), 1., delta=0.1) + self.assertAlmostEqual(np.mean(res[2]), .0, delta=0.1) + self.assertAlmostEqual(np.std(res[2]), 1., delta=0.1) + self.assertAlmostEqual(np.mean(res[3]), .0, delta=0.1) + self.assertAlmostEqual(np.std(res[3]), 1., delta=0.1) + self.assertAlmostEqual(np.mean(res[4]), .0, delta=0.1) + self.assertAlmostEqual(np.std(res[4]), 1., delta=0.1) + self.assertAlmostEqual(np.mean(res[5]), .0, delta=0.1) + self.assertAlmostEqual(np.std(res[5]), 1., delta=0.1) + + +class TestRandnOpError(unittest.TestCase): + def test_error(self): + with program_guard(Program(), Program()): + + # The argument shape's size of randn_op should not be 0. + def test_shape_size(): + out = paddle.randn(shape=[]) + + self.assertRaises(AssertionError, test_shape_size) + + # The argument shape's type of randn_op should be list or tuple. + def test_shape_type(): + out = paddle.randn(shape=1) + + self.assertRaises(TypeError, test_shape_type) + + # The argument dtype of randn_op should be float32 or float64. + def test_dtype_float16(): + out = paddle.randn(shape=[1, 2], dtype='float16') + + self.assertRaises(TypeError, test_dtype_float16) + + # The argument dtype of randn_op should be float32 or float64. + def test_dtype_int32(): + out = paddle.randn(shape=[1, 2], dtype='int32') + + self.assertRaises(TypeError, test_dtype_int32) + + # The argument dtype of randn_op should be float32 or float64. + def test_dtype_int64(): + out = paddle.randn(shape=[1, 2], dtype='int64') + + self.assertRaises(TypeError, test_dtype_int64) + + # The argument dtype of randn_op should be float32 or float64. + def test_dtype_uint8(): + out = paddle.randn(shape=[1, 2], dtype='uint8') + + self.assertRaises(TypeError, test_dtype_uint8) + + # The argument dtype of randn_op should be float32 or float64. + def test_dtype_bool(): + out = paddle.randn(shape=[1, 2], dtype='bool') + + self.assertRaises(TypeError, test_dtype_bool) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 0f1accd51a..e9cd06942f 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -62,7 +62,7 @@ from .logic import elementwise_equal #DEFINE_ALIAS # from .random import gaussin #DEFINE_ALIAS # from .random import uniform #DEFINE_ALIAS # from .random import shuffle #DEFINE_ALIAS -# from .random import randn #DEFINE_ALIAS +from .random import randn #DEFINE_ALIAS # from .random import rand #DEFINE_ALIAS from .random import randint #DEFINE_ALIAS from .random import randperm diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index f119e0a983..6a1cd6cc01 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -21,8 +21,10 @@ # 'rand', # 'randint'] +import numpy as np + from ..fluid import core -from ..fluid.framework import device_guard, in_dygraph_mode, _varbase_creator, Variable +from ..fluid.framework import device_guard, in_dygraph_mode, _varbase_creator, Variable, convert_np_dtype_to_dtype_ from ..fluid.layers.layer_function_generator import templatedoc from ..fluid.layer_helper import LayerHelper from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype @@ -191,6 +193,113 @@ def randint(low, return out +def randn(shape, + out=None, + dtype=None, + device=None, + stop_gradient=True, + name=None): + """ + This function returns a tensor filled with random numbers from a normal + distribution with mean 0 and variance 1 (also called the standard normal + distribution). + + Args: + shape(list|tuple): Shape of the generated random tensor. + out(Variable, optional): Optional output which can be any created Variable + that meets the requirements to store the result of operation. If the + out is `None`, a new Variable wiil be returned to store the result. + Default is None. + dtype(np.dtype|core.VarDesc.VarType|str, optional): Data type of the output + tensor, which can be float32, float64. if dtype is `None` , the data + type of output tensor is `float32` . + Default is None. + device(str, optional): Specific the output variable to be saved in cpu + or gpu memory. Supported None, 'cpu', 'gpu'. If it is None, the output + variable will be automatically assigned devices. + Default: None. + stop_gradient(bool, optional): Indicating if we stop gradient from current(out) + Variable. Default is True. + name(str, optional): Normally there is no need for user to set this property. + For more information, please refer to :ref:`api_guide_Name` . + Default is None. + + Returns: + Random tensor whose data is drawn from a Gaussian distribution, + dtype: flaot32 or float64 as specified. + + Return type: + Variable + + Raises: + TypeError: If the type of `shape` is not list or tuple. + TypeError: If the data type of `dtype` is not float32 or float64. + ValueError: If the length of `shape` is not bigger than 0. + + Examples: + .. code-block:: python + + # declarative mode + import paddle + import paddle.fluid as fluid + + data = paddle.randn([2, 4]) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + res, = exe.run(fluid.default_main_program(), feed={}, fetch_list=[data]) + print(res) + # [[-1.4187592 0.7368311 -0.53748125 -0.0146909 ] + # [-0.66294265 -1.3090698 0.1898754 -0.14065823]] + + .. code-block:: python + + # imperative mode + import paddle + import paddle.fluid as fluid + import paddle.fluid.dygraph as dg + + place = fluid.CPUPlace() + with dg.guard(place) as g: + x = paddle.randn([2, 4]) + x_np = x.numpy() + print(x_np) + # [[ 1.5149173 -0.26234224 -0.592486 1.4523455 ] + # [ 0.04581212 -0.85345626 1.1687907 -0.02512913]] + """ + helper = LayerHelper("randn", **locals()) + check_type(shape, 'shape', (list, tuple), 'randn') + assert len(shape) > 0, ("The size of argument(shape) can't be zero.") + + if dtype is None: + dtype = 'float32' + + check_dtype(dtype, 'create data type', ['float32', 'float64'], 'randn') + + if out is None: + out = helper.create_variable_for_type_inference(dtype=dtype) + else: + check_variable_and_dtype(out, 'out', [dtype], 'randn') + + out.stop_gradient = stop_gradient + + dtype = convert_np_dtype_to_dtype_(dtype) + seed = np.random.randint(0, 100) + + with device_guard(device): + helper.append_op( + type='gaussian_random', + outputs={'Out': out}, + attrs={ + 'shape': shape, + 'mean': 0.0, + 'std': 1.0, + 'seed': seed, + 'dtype': dtype, + 'use_mkldnn': False + }) + return out + + @templatedoc() def randperm(n, out=None, diff --git a/python/setup.py.in b/python/setup.py.in index e5acc278e1..2921b4ff4c 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -113,6 +113,7 @@ packages=['paddle', 'paddle.fluid', 'paddle.tensor', 'paddle.fluid.dygraph', + 'paddle.tensor', 'paddle.fluid.dygraph.dygraph_to_static', 'paddle.fluid.proto', 'paddle.fluid.proto.profiler', -- GitLab