未验证 提交 0f165f0b 编写于 作者: F From00 提交者: GitHub

Add yaml for randint OP (#41375)

上级 bcb663cc
......@@ -63,6 +63,34 @@ void RandpermInferMeta(int n, DataType dtype, MetaTensor* out) {
out->set_dtype(dtype);
}
void RandintInferMeta(
int low, int high, const IntArray& shape, DataType dtype, MetaTensor* out) {
PADDLE_ENFORCE_NOT_NULL(
out, errors::InvalidArgument("Output(Out) of RandintOp is null."));
PADDLE_ENFORCE_LT(
low,
high,
errors::InvalidArgument("randint's low must less then high, "
"but received: low = %d, high = %d.",
low,
high));
auto& shape_vector = shape.GetData();
PADDLE_ENFORCE_EQ(
shape_vector.empty(),
false,
errors::InvalidArgument("The shape information should not be empty, it "
"must be set by Attr(shape)."));
std::vector<int64_t> tensor_shape;
tensor_shape.reserve(shape_vector.size());
for (auto dim : shape_vector) {
tensor_shape.push_back(static_cast<int64_t>(dim));
}
out->set_dims(make_ddim(tensor_shape));
out->set_dtype(dtype);
}
void TruncatedGaussianRandomInferMeta(const std::vector<int>& shape,
float mean,
float std,
......
......@@ -55,6 +55,9 @@ void GaussianRandomInferMeta(const IntArray& shape,
void RandpermInferMeta(int n, DataType dtype, MetaTensor* out);
void RandintInferMeta(
int low, int high, const IntArray& shape, DataType dtype, MetaTensor* out);
void TruncatedGaussianRandomInferMeta(const std::vector<int>& shape,
float mean,
float std,
......
......@@ -14,13 +14,14 @@
from __future__ import print_function
import os
import paddle
import unittest
import numpy as np
from op_test import OpTest
import paddle
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
from paddle.static import program_guard, Program
import os
paddle.enable_static()
......@@ -53,6 +54,10 @@ class TestRandintOp(OpTest):
np.allclose(
hist, prob, rtol=0, atol=0.001), "hist: " + str(hist))
def test_check_output_eager(self):
with _test_eager_guard():
self.test_check_output()
class TestRandintOpError(unittest.TestCase):
def test_errors(self):
......@@ -67,6 +72,10 @@ class TestRandintOpError(unittest.TestCase):
self.assertRaises(
TypeError, paddle.randint, 5, shape=[shape_tensor])
def test_errors_eager(self):
with _test_eager_guard():
self.test_errors()
class TestRandintOp_attr_tensorlist(OpTest):
def setUp(self):
......@@ -93,6 +102,10 @@ class TestRandintOp_attr_tensorlist(OpTest):
np.allclose(
hist, prob, rtol=0, atol=0.001), "hist: " + str(hist))
def test_check_output_eager(self):
with _test_eager_guard():
self.test_check_output()
class TestRandint_attr_tensor(OpTest):
def setUp(self):
......@@ -114,6 +127,10 @@ class TestRandint_attr_tensor(OpTest):
np.allclose(
hist, prob, rtol=0, atol=0.001), "hist: " + str(hist))
def test_check_output_eager(self):
with _test_eager_guard():
self.test_check_output()
# Test python API
class TestRandintAPI(unittest.TestCase):
......@@ -145,18 +162,30 @@ class TestRandintAPI(unittest.TestCase):
feed={'var_shape': np.array([100, 100]).astype('int64')},
fetch_list=[out1, out2, out3, out4, out5])
def test_api_eager(self):
with _test_eager_guard():
self.test_api()
class TestRandintImperative(unittest.TestCase):
def test_api(self):
n = 10
paddle.disable_static()
self.run_test_case()
with _test_eager_guard():
self.run_test_case()
paddle.enable_static()
def run_test_case(self):
n = 10
x1 = paddle.randint(n, shape=[10], dtype="int32")
x2 = paddle.tensor.randint(n)
x3 = paddle.tensor.random.randint(n)
for i in [x1, x2, x3]:
for j in i.numpy().tolist():
self.assertTrue((j >= 0 and j < n))
paddle.enable_static()
class TestRandomValue(unittest.TestCase):
......@@ -174,6 +203,15 @@ class TestRandomValue(unittest.TestCase):
print("Test Fixed Random number on GPU------>")
paddle.disable_static()
self.run_test_case()
with _test_eager_guard():
self.run_test_case()
paddle.enable_static()
def run_test_case(self):
paddle.set_device('gpu')
paddle.seed(100)
......@@ -198,7 +236,6 @@ class TestRandomValue(unittest.TestCase):
self.assertTrue(np.array_equal(x[20, 1, 600, 600:605], expect))
expect = [3581, 3420, -8027, -5237, -2436]
self.assertTrue(np.array_equal(x[30, 2, 1000, 1000:1005], expect))
paddle.enable_static()
if __name__ == "__main__":
......
......@@ -22,7 +22,7 @@ from ..fluid.layers import utils
import paddle
from paddle import _C_ops
from paddle.static import Variable
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph, _current_expected_place
__all__ = []
......@@ -687,7 +687,11 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if paddle.in_dynamic_mode():
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
place = _current_expected_place()
return _C_ops.final_state_randint(low, high, shape, dtype, place)
if _in_legacy_dygraph():
shape = utils.convert_shape_to_list(shape)
return _C_ops.randint('shape', shape, 'low', low, 'high', high, 'seed',
0, 'dtype', dtype)
......@@ -920,8 +924,7 @@ def randperm(n, dtype="int64", name=None):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
return _C_ops.final_state_randperm(
n, dtype, paddle.fluid.framework._current_expected_place())
return _C_ops.final_state_randperm(n, dtype, _current_expected_place())
if _in_legacy_dygraph():
return _C_ops.randperm('n', n, 'seed', 0, 'dtype', dtype)
......
......@@ -1265,6 +1265,18 @@
data_type : x
backward : put_along_axis_grad
- api : randint
args : (int low, int high, IntArray shape, DataType dtype=DataType::INT64, Place place={})
output : Tensor(out)
infer_meta :
func : RandintInferMeta
param : [low, high, shape, dtype]
kernel :
func : randint
param : [low, high, shape, dtype]
data_type : dtype
backend : place
- api : randperm
args : (int n, DataType dtype, Place place={})
output : Tensor
......@@ -1276,7 +1288,7 @@
param : [n, dtype]
data_type : dtype
backend : place
- api : reciprocal
args : (Tensor x)
output : Tensor
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册