From 0f165f0b34d9278620489c8323d57a23bfe58021 Mon Sep 17 00:00:00 2001 From: From00 Date: Mon, 4 Apr 2022 08:46:04 +0800 Subject: [PATCH] Add yaml for randint OP (#41375) --- paddle/phi/infermeta/nullary.cc | 28 +++++++++++ paddle/phi/infermeta/nullary.h | 3 ++ .../fluid/tests/unittests/test_randint_op.py | 47 +++++++++++++++++-- python/paddle/tensor/random.py | 11 +++-- python/paddle/utils/code_gen/api.yaml | 14 +++++- 5 files changed, 93 insertions(+), 10 deletions(-) diff --git a/paddle/phi/infermeta/nullary.cc b/paddle/phi/infermeta/nullary.cc index 6a05e1b4d7f..f76e7910d77 100644 --- a/paddle/phi/infermeta/nullary.cc +++ b/paddle/phi/infermeta/nullary.cc @@ -63,6 +63,34 @@ void RandpermInferMeta(int n, DataType dtype, MetaTensor* out) { out->set_dtype(dtype); } +void RandintInferMeta( + int low, int high, const IntArray& shape, DataType dtype, MetaTensor* out) { + PADDLE_ENFORCE_NOT_NULL( + out, errors::InvalidArgument("Output(Out) of RandintOp is null.")); + PADDLE_ENFORCE_LT( + low, + high, + errors::InvalidArgument("randint's low must less then high, " + "but received: low = %d, high = %d.", + low, + high)); + + auto& shape_vector = shape.GetData(); + PADDLE_ENFORCE_EQ( + shape_vector.empty(), + false, + errors::InvalidArgument("The shape information should not be empty, it " + "must be set by Attr(shape).")); + + std::vector tensor_shape; + tensor_shape.reserve(shape_vector.size()); + for (auto dim : shape_vector) { + tensor_shape.push_back(static_cast(dim)); + } + out->set_dims(make_ddim(tensor_shape)); + out->set_dtype(dtype); +} + void TruncatedGaussianRandomInferMeta(const std::vector& shape, float mean, float std, diff --git a/paddle/phi/infermeta/nullary.h b/paddle/phi/infermeta/nullary.h index ada44658a2c..f84ac01d002 100644 --- a/paddle/phi/infermeta/nullary.h +++ b/paddle/phi/infermeta/nullary.h @@ -55,6 +55,9 @@ void GaussianRandomInferMeta(const IntArray& shape, void RandpermInferMeta(int n, DataType dtype, MetaTensor* out); +void RandintInferMeta( + int low, int high, const IntArray& shape, DataType dtype, MetaTensor* out); + void TruncatedGaussianRandomInferMeta(const std::vector& shape, float mean, float std, diff --git a/python/paddle/fluid/tests/unittests/test_randint_op.py b/python/paddle/fluid/tests/unittests/test_randint_op.py index 5f58054d7ef..1eb99e08bb8 100644 --- a/python/paddle/fluid/tests/unittests/test_randint_op.py +++ b/python/paddle/fluid/tests/unittests/test_randint_op.py @@ -14,13 +14,14 @@ from __future__ import print_function +import os +import paddle import unittest import numpy as np from op_test import OpTest -import paddle from paddle.fluid import core +from paddle.fluid.framework import _test_eager_guard from paddle.static import program_guard, Program -import os paddle.enable_static() @@ -53,6 +54,10 @@ class TestRandintOp(OpTest): np.allclose( hist, prob, rtol=0, atol=0.001), "hist: " + str(hist)) + def test_check_output_eager(self): + with _test_eager_guard(): + self.test_check_output() + class TestRandintOpError(unittest.TestCase): def test_errors(self): @@ -67,6 +72,10 @@ class TestRandintOpError(unittest.TestCase): self.assertRaises( TypeError, paddle.randint, 5, shape=[shape_tensor]) + def test_errors_eager(self): + with _test_eager_guard(): + self.test_errors() + class TestRandintOp_attr_tensorlist(OpTest): def setUp(self): @@ -93,6 +102,10 @@ class TestRandintOp_attr_tensorlist(OpTest): np.allclose( hist, prob, rtol=0, atol=0.001), "hist: " + str(hist)) + def test_check_output_eager(self): + with _test_eager_guard(): + self.test_check_output() + class TestRandint_attr_tensor(OpTest): def setUp(self): @@ -114,6 +127,10 @@ class TestRandint_attr_tensor(OpTest): np.allclose( hist, prob, rtol=0, atol=0.001), "hist: " + str(hist)) + def test_check_output_eager(self): + with _test_eager_guard(): + self.test_check_output() + # Test python API class TestRandintAPI(unittest.TestCase): @@ -145,18 +162,30 @@ class TestRandintAPI(unittest.TestCase): feed={'var_shape': np.array([100, 100]).astype('int64')}, fetch_list=[out1, out2, out3, out4, out5]) + def test_api_eager(self): + with _test_eager_guard(): + self.test_api() + class TestRandintImperative(unittest.TestCase): def test_api(self): - n = 10 paddle.disable_static() + + self.run_test_case() + + with _test_eager_guard(): + self.run_test_case() + + paddle.enable_static() + + def run_test_case(self): + n = 10 x1 = paddle.randint(n, shape=[10], dtype="int32") x2 = paddle.tensor.randint(n) x3 = paddle.tensor.random.randint(n) for i in [x1, x2, x3]: for j in i.numpy().tolist(): self.assertTrue((j >= 0 and j < n)) - paddle.enable_static() class TestRandomValue(unittest.TestCase): @@ -174,6 +203,15 @@ class TestRandomValue(unittest.TestCase): print("Test Fixed Random number on GPU------>") paddle.disable_static() + + self.run_test_case() + + with _test_eager_guard(): + self.run_test_case() + + paddle.enable_static() + + def run_test_case(self): paddle.set_device('gpu') paddle.seed(100) @@ -198,7 +236,6 @@ class TestRandomValue(unittest.TestCase): self.assertTrue(np.array_equal(x[20, 1, 600, 600:605], expect)) expect = [3581, 3420, -8027, -5237, -2436] self.assertTrue(np.array_equal(x[30, 2, 1000, 1000:1005], expect)) - paddle.enable_static() if __name__ == "__main__": diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 20f4e73b271..d2e43634437 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -22,7 +22,7 @@ from ..fluid.layers import utils import paddle from paddle import _C_ops from paddle.static import Variable -from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode +from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph, _current_expected_place __all__ = [] @@ -687,7 +687,11 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) - if paddle.in_dynamic_mode(): + if in_dygraph_mode(): + shape = utils.convert_shape_to_list(shape) + place = _current_expected_place() + return _C_ops.final_state_randint(low, high, shape, dtype, place) + if _in_legacy_dygraph(): shape = utils.convert_shape_to_list(shape) return _C_ops.randint('shape', shape, 'low', low, 'high', high, 'seed', 0, 'dtype', dtype) @@ -920,8 +924,7 @@ def randperm(n, dtype="int64", name=None): dtype = convert_np_dtype_to_dtype_(dtype) if in_dygraph_mode(): - return _C_ops.final_state_randperm( - n, dtype, paddle.fluid.framework._current_expected_place()) + return _C_ops.final_state_randperm(n, dtype, _current_expected_place()) if _in_legacy_dygraph(): return _C_ops.randperm('n', n, 'seed', 0, 'dtype', dtype) diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 507f8b3f360..fb0c6e294a0 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -1265,6 +1265,18 @@ data_type : x backward : put_along_axis_grad +- api : randint + args : (int low, int high, IntArray shape, DataType dtype=DataType::INT64, Place place={}) + output : Tensor(out) + infer_meta : + func : RandintInferMeta + param : [low, high, shape, dtype] + kernel : + func : randint + param : [low, high, shape, dtype] + data_type : dtype + backend : place + - api : randperm args : (int n, DataType dtype, Place place={}) output : Tensor @@ -1276,7 +1288,7 @@ param : [n, dtype] data_type : dtype backend : place - + - api : reciprocal args : (Tensor x) output : Tensor -- GitLab