未验证 提交 614eb942 编写于 作者: X Xing Wu 提交者: GitHub

upload code for tensor.rand (#23507)

* upload code for tensor.rand

* fix import

* update example, change paddle.tensor.rand to paddle.rand

* change 'variable' to 'Variable',  test=develop

change 'variable' to 'Variable' in description,  test=develop

* add pre-commit check

* add pre-commit check

* pre-commit check, test=develop

* add more unittest code

* trigger ci, test=develop

* pre-commit check, test=develop

* update api and test comment, test=develop

* update api and test comment, test=develop

* add more type check, test=develop

* add detail error info for device, test=develop

* add unnittest, test=develop

* resolve conflict and pre-commit check, test=develop
上级 c6c65c65
......@@ -91,7 +91,7 @@ from .tensor.logic import elementwise_equal #DEFINE_ALIAS
# from .tensor.random import shuffle #DEFINE_ALIAS
from .tensor.random import randn #DEFINE_ALIAS
from .tensor.random import randperm
# from .tensor.random import rand #DEFINE_ALIAS
from .tensor.random import rand #DEFINE_ALIAS
from .tensor.random import randint #DEFINE_ALIAS
# from .tensor.math import abs #DEFINE_ALIAS
# from .tensor.math import acos #DEFINE_ALIAS
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
from paddle import rand
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
class TestRandOpError(unittest.TestCase):
"""
This class test the input type check.
"""
def test_errors(self):
main_prog = Program()
start_prog = Program()
with program_guard(main_prog, start_prog):
def test_Variable():
x1 = fluid.create_lod_tensor(
np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace())
rand(x1)
self.assertRaises(TypeError, test_Variable)
def test_dtype():
dim_1 = fluid.layers.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5)
rand(shape=[dim_1, dim_2], dtype='int32')
self.assertRaises(TypeError, test_dtype)
def test_shape_list():
rand(shape=[2.])
self.assertRaises(TypeError, test_shape_list)
def test_shape_list2():
rand(shape=[2, 3.])
self.assertRaises(TypeError, test_shape_list2)
def test_device():
rand(shape=[3, 4], device='device')
self.assertRaises(ValueError, test_device)
class TestRandOp(unittest.TestCase):
"""
This class test the common usages of randop.
"""
def test_run(self):
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
result_1 = rand(shape=[3, 4])
dim_1 = fluid.layers.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5)
result_2 = rand(shape=[dim_1, dim_2])
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = rand(var_shape)
var_shape_int32 = fluid.data(
name='var_shape_int32', shape=[2], dtype="int32")
result_4 = rand(var_shape_int32)
exe.run(startup_program)
x1 = np.array([3, 2]).astype('int64')
x2 = np.array([4, 3]).astype('int32')
ret = exe.run(train_program,
feed={"var_shape": x1,
"var_shape_int32": x2},
fetch_list=[result_1, result_2, result_3, result_4])
class TestRandOpForDygraph(unittest.TestCase):
"""
This class test the common usages of randop.
"""
def test_run(self):
use_cuda = False
with fluid.dygraph.guard():
rand(shape=[3, 4])
dim_1 = fluid.layers.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5)
rand(shape=[dim_1, dim_2])
var_shape = fluid.dygraph.to_variable(np.array([3, 4]))
rand(var_shape)
if __name__ == "__main__":
unittest.main()
......@@ -69,7 +69,7 @@ from .logic import elementwise_equal #DEFINE_ALIAS
# from .random import uniform #DEFINE_ALIAS
# from .random import shuffle #DEFINE_ALIAS
from .random import randn #DEFINE_ALIAS
# from .random import rand #DEFINE_ALIAS
from .random import rand #DEFINE_ALIAS
from .random import randint #DEFINE_ALIAS
from .random import randperm
# from .math import abs #DEFINE_ALIAS
......
......@@ -28,10 +28,10 @@ from ..fluid.framework import device_guard, in_dygraph_mode, _varbase_creator, V
from ..fluid.layers.layer_function_generator import templatedoc
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ..fluid.layers import utils
from ..fluid.layers import uniform_random, utils
from ..fluid.layers.tensor import fill_constant
__all__ = ['randperm', 'randn', 'randint']
__all__ = ['randperm', 'randn', 'randint', 'rand']
def randint(low,
......@@ -391,3 +391,94 @@ def randperm(n,
helper.append_op(
type='randperm', inputs=inputs, outputs=outputs, attrs=attrs)
return out
def rand(shape, out=None, dtype=None, device=None, stop_gradient=True):
"""
This OP initializes a variable with random values sampled from a
uniform distribution in the range [0, 1).
Examples:
::
Input:
shape = [1, 2]
Output:
result=[[0.8505902, 0.8397286]]
Args:
shape(list|tuple|Variable): Shape of the Tensor to be created.
The data type is ``int32`` or ``int64`` . If ``shape`` is a list or tuple,
the elements of it should be integers or Tensors with shape [1].
If ``shape`` is a Variable, it should be an 1-D Tensor .
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
dtype(np.dtype|core.VarDesc.VarType|str, optional): Data type of the output tensor
which can be float32, float64, if dytpe is `None`, the data
type of created tensor is `float32`
device(str, optional): This parameter specifies that the Tensor is created
on the GPU or CPU.
stop_gradient(bool, optional): Indicating if we stop gradient from current(out) Variable,
default value is True.
Returns:
Variable: A Tensor of the specified shape filled with random numbers from a uniform distribution on the interval [0, 1).
Raises:
TypeError: The shape type should be list or tupple or Variable.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
result_1 = paddle.rand(shape=[3, 4])
# example 2:
# attr shape is a list which contains tensor Variable.
dim_1 = fluid.layers.fill_constant([1],"int64",3)
dim_2 = fluid.layers.fill_constant([1],"int32",5)
result_2 = paddle.rand(shape=[dim_1, dim_2])
# example 3:
# attr shape is a Variable, the data type must be int64 or int32.
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = paddle.rand(var_shape)
var_shape_int32 = fluid.data(name='var_shape_int32', shape=[2], dtype="int32")
result_4 = paddle.rand(var_shape_int32)
"""
if dtype is None:
dtype = 'float32'
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'rand')
check_type(shape, 'shape', (Variable, list, tuple), 'rand')
if isinstance(shape, Variable):
check_variable_and_dtype(shape, 'shape', ['int32', 'int64'], 'rand')
elif isinstance(shape, (list, tuple)):
for i, _shape in enumerate(shape):
if not isinstance(_shape, Variable):
check_type(_shape, '_shape', (int), 'rand')
else:
check_variable_and_dtype(_shape, 'shape[' + str(i) + ']',
['int32', 'int64'], 'rand')
if device not in [None, 'cpu', 'gpu']:
raise ValueError(
"The input device should in [None, 'cpu', 'gpu'], but received {}".
format(device))
helper = LayerHelper("rand", **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
check_variable_and_dtype(out, 'out', [dtype], 'rand')
out.stop_gradient = stop_gradient
with device_guard(device):
out = uniform_random(shape, dtype, min=0., max=1.0)
return out
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册