未验证 提交 5b573c58 编写于 作者: Z zhupengyang 提交者: GitHub

randperm API: remove out, devive, stop_gradient; add name (#25410)

上级 ccb98cde
......@@ -92,4 +92,5 @@ template <typename T>
using kernel =
paddle::operators::RandpermKernel<paddle::platform::CPUDeviceContext, T>;
REGISTER_OP_CPU_KERNEL(randperm, kernel<int64_t>, kernel<int>);
REGISTER_OP_CPU_KERNEL(randperm, kernel<int64_t>, kernel<int>, kernel<float>,
kernel<double>);
......@@ -20,4 +20,5 @@ template <typename T>
using kernel =
paddle::operators::RandpermKernel<paddle::platform::CUDADeviceContext, T>;
REGISTER_OP_CUDA_KERNEL(randperm, kernel<int64_t>, kernel<int>);
REGISTER_OP_CUDA_KERNEL(randperm, kernel<int64_t>, kernel<int>, kernel<float>,
kernel<double>);
......@@ -16,10 +16,8 @@ import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from paddle.fluid import Program, program_guard
from paddle import Program, program_guard
def check_randperm_out(n, data_np):
......@@ -36,8 +34,11 @@ def error_msg(data_np):
def convert_dtype(dtype_str):
dtype_str_list = ["int32", "int64"]
dtype_num_list = [2, 3]
dtype_str_list = ["int32", "int64", "float32", "float64"]
dtype_num_list = [
core.VarDesc.VarType.INT32, core.VarDesc.VarType.INT64,
core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP64
]
assert dtype_str in dtype_str_list, dtype_str + \
" should in " + str(dtype_str_list)
return dtype_num_list[dtype_str_list.index(dtype_str)]
......@@ -50,8 +51,6 @@ class TestRandpermOp(OpTest):
self.op_type = "randperm"
self.n = 200
self.dtype = "int64"
self.device = None
self.seed = 0
self.inputs = {}
self.outputs = {"Out": np.zeros((self.n)).astype(self.dtype)}
......@@ -59,8 +58,6 @@ class TestRandpermOp(OpTest):
self.attrs = {
"n": self.n,
"dtype": convert_dtype(self.dtype),
"device": self.device,
"seed": self.seed,
}
def init_attrs(self):
......@@ -75,100 +72,60 @@ class TestRandpermOp(OpTest):
check_randperm_out(self.n, out_np), msg=error_msg(out_np))
class TestRandpermOp_attr_n(TestRandpermOp):
""" Test randperm op for attr n. """
class TestRandpermOpN(TestRandpermOp):
def init_attrs(self):
self.n = 10000
class TestRandpermOp_attr_int32(TestRandpermOp):
""" Test randperm op for attr int32 dtype. """
class TestRandpermOpInt32(TestRandpermOp):
def init_attrs(self):
self.dtype = "int32"
class TestRandpermOp_attr_device_cpu(TestRandpermOp):
""" Test randperm op for cpu device. """
class TestRandpermOpFloat32(TestRandpermOp):
def init_attrs(self):
self.device = "cpu"
self.dtype = "float32"
class TestRandpermOp_attr_device_gpu(TestRandpermOp):
""" Test randperm op for gpu device. """
class TestRandpermOpFloat64(TestRandpermOp):
def init_attrs(self):
self.device = "gpu"
class TestRandpermOp_attr_seed(TestRandpermOp):
""" Test randperm op for attr seed. """
def init_attrs(self):
self.seed = 10
self.dtype = "float64"
class TestRandpermOpError(unittest.TestCase):
""" Test randperm op for raise error. """
def test_errors(self):
main_prog = Program()
start_prog = Program()
with program_guard(main_prog, start_prog):
def test_Variable():
out = np.arange(10)
paddle.randperm(n=10, out=out)
self.assertRaises(TypeError, test_Variable)
with program_guard(Program(), Program()):
self.assertRaises(ValueError, paddle.randperm, -3)
self.assertRaises(TypeError, paddle.randperm, 10, 'int8')
def test_value():
paddle.randperm(n=-3)
self.assertRaises(ValueError, test_value)
class TestRandpermOp_attr_out(unittest.TestCase):
""" Test randperm op for attr out. """
def test_attr_tensor_API(self):
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
class TestRandpermAPI(unittest.TestCase):
def test_out(self):
n = 10
data_1 = fluid.layers.fill_constant([n], "int64", 3)
paddle.randperm(n=n, out=data_1)
data_2 = paddle.randperm(n=n, dtype="int32", device="cpu")
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
with program_guard(Program(), Program()):
x1 = paddle.randperm(n)
x2 = paddle.randperm(n, 'float32')
exe.run(startup_program)
outs = exe.run(train_program, fetch_list=[data_1, data_2])
exe = paddle.Executor(place)
res = exe.run(fetch_list=[x1, x2])
out_np = np.array(outs[0])
self.assertTrue(
check_randperm_out(n, out_np), msg=error_msg(out_np))
self.assertEqual(res[0].dtype, np.int64)
self.assertEqual(res[1].dtype, np.float32)
self.assertTrue(check_randperm_out(n, res[0]))
self.assertTrue(check_randperm_out(n, res[1]))
class TestRandpermDygraphMode(unittest.TestCase):
def test_check_output(self):
with fluid.dygraph.guard():
class TestRandpermImperative(unittest.TestCase):
def test_out(self):
with paddle.imperative.guard():
n = 10
data_1 = paddle.randperm(n, dtype="int64")
data_1_np = data_1.numpy()
self.assertTrue(
check_randperm_out(n, data_1_np), msg=error_msg(data_1_np))
data_2 = paddle.randperm(n, dtype="int32", device="cpu")
data_2_np = data_2.numpy()
for dtype in ['int32', np.int64, 'float32', 'float64']:
data_p = paddle.randperm(n, dtype)
data_np = data_p.numpy()
self.assertTrue(
check_randperm_out(n, data_2_np), msg=error_msg(data_2_np))
check_randperm_out(n, data_np), msg=error_msg(data_np))
if __name__ == "__main__":
......
......@@ -317,12 +317,7 @@ def randn(shape,
@templatedoc()
def randperm(n,
out=None,
dtype="int64",
device=None,
stop_gradient=True,
seed=0):
def randperm(n, dtype="int64", name=None):
"""
:alias_main: paddle.randperm
:alias: paddle.randperm,paddle.tensor.randperm,paddle.tensor.random.randperm
......@@ -330,23 +325,13 @@ def randperm(n,
${comment}
Args:
n (int): The upper bound (exclusive), and it should be greater than 0.
out (Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
If out is None, a new Varibale will be create to store the result.
Default: None.
dtype (np.dtype|core.VarDesc.VarType|str, optional): The type of the
output Tensor. Supported data types: int64, int32. Default: int32.
device (str, optional): Specific the output variable to be saved in cpu
or gpu memory. Supported None, 'cpu', 'gpu'. If it is None, the output
variable will be automatically assigned devices.
Default: None.
stop_gradient (bool, optional): Whether grad should record operations
on the returned tensor. Default: True.
seed (int, optional): Random seed used for permute samples. If seed is
equal to 0, it means use a seed generated by the system. Note that
if seed is not 0, this operator will always generate the same random
permutation every time. Default: 0.
n(int): The upper bound (exclusive), and it should be greater than 0.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The type of the
output Tensor. Supported data types: int32, int64, float32, float64.
Default: int32.
name(str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Default is None.
Returns:
${out_comment}.
......@@ -358,51 +343,32 @@ def randperm(n,
.. code-block:: python
import paddle
import paddle.fluid as fluid
num = 6
is_use_gpu = False
data_1 = paddle.randperm(num)
fluid.layers.Print(data_1)
data_2 = paddle.randperm(num, dtype="int32", seed=1)
fluid.layers.Print(data_2)
data_3 = paddle.randperm(num, stop_gradient=False, device="cpu")
fluid.layers.Print(data_3)
paddle.enable_imperative()
paddle.randperm(num, out=data_3)
fluid.layers.Print(data_3)
result_1 = paddle.randperm(5)
# [4 1 2 3 0]
place = fluid.CUDAPlace(0) if is_use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
exe.run()
result_2 = paddle.randperm(7, 'int32')
# [1 6 2 0 4 3 5]
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
return core.ops.randperm('n', n, 'seed', 0, 'dtype', dtype)
if n < 1:
raise ValueError("The input n should be greater than 0 in randperm op.")
check_dtype(dtype, 'dtype', ['int64', 'int32'], 'randperm')
dtype = convert_dtype(dtype)
if device not in [None, 'cpu', 'gpu']:
raise ValueError("The input device should in [None, 'cpu', 'gpu'].")
check_type(stop_gradient, 'stop_gradient', bool, 'randperm')
check_dtype(dtype, 'dtype', ['int64', 'int32', 'float32', 'float64'],
'randperm')
helper = LayerHelper("randperm", **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
check_variable_and_dtype(out, 'out', [dtype], 'randperm')
if stop_gradient:
out.stop_gradient = True
inputs = dict()
outputs = {'Out': [out]}
attrs = {'n': n, 'dtype': out.dtype, 'seed': seed}
with device_guard(device):
out = helper.create_variable_for_type_inference(dtype)
attrs = {'n': n, 'dtype': dtype, 'seed': 0}
helper.append_op(
type='randperm', inputs=inputs, outputs=outputs, attrs=attrs)
type='randperm', inputs={}, outputs={'Out': out}, attrs=attrs)
return out
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册