test_randint_op.py 8.3 KB
Newer Older
S
silingtong123 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

F
From00 已提交
17 18
import os
import paddle
S
silingtong123 已提交
19 20 21
import unittest
import numpy as np
from op_test import OpTest
22
from paddle.fluid import core
F
From00 已提交
23
from paddle.fluid.framework import _test_eager_guard
24
from paddle.static import program_guard, Program
25 26

paddle.enable_static()
S
silingtong123 已提交
27 28 29


def output_hist(out):
30
    hist, _ = np.histogram(out, range=(-10, 10))
S
silingtong123 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44
    hist = hist.astype("float32")
    hist /= float(out.size)
    prob = 0.1 * np.ones((10))
    return hist, prob


class TestRandintOp(OpTest):
    def setUp(self):
        self.op_type = "randint"
        self.inputs = {}
        self.init_attrs()
        self.outputs = {"Out": np.zeros((10000, 784)).astype("float32")}

    def init_attrs(self):
45
        self.attrs = {"shape": [10000, 784], "low": -10, "high": 10, "seed": 10}
S
silingtong123 已提交
46 47 48 49 50 51 52 53 54
        self.output_hist = output_hist

    def test_check_output(self):
        self.check_output_customized(self.verify_output)

    def verify_output(self, outs):
        hist, prob = self.output_hist(np.array(outs[0]))
        self.assertTrue(
            np.allclose(
55
                hist, prob, rtol=0, atol=0.001), "hist: " + str(hist))
S
silingtong123 已提交
56

F
From00 已提交
57 58 59 60
    def test_check_output_eager(self):
        with _test_eager_guard():
            self.test_check_output()

S
silingtong123 已提交
61 62 63

class TestRandintOpError(unittest.TestCase):
    def test_errors(self):
64 65 66 67
        with program_guard(Program(), Program()):
            self.assertRaises(TypeError, paddle.randint, 5, shape=np.array([2]))
            self.assertRaises(TypeError, paddle.randint, 5, dtype='float32')
            self.assertRaises(ValueError, paddle.randint, 5, 5)
68
            self.assertRaises(ValueError, paddle.randint, -5)
69 70 71 72 73
            self.assertRaises(TypeError, paddle.randint, 5, shape=['2'])
            shape_tensor = paddle.static.data('X', [1])
            self.assertRaises(TypeError, paddle.randint, 5, shape=shape_tensor)
            self.assertRaises(
                TypeError, paddle.randint, 5, shape=[shape_tensor])
S
silingtong123 已提交
74

F
From00 已提交
75 76 77 78
    def test_errors_eager(self):
        with _test_eager_guard():
            self.test_errors()

S
silingtong123 已提交
79 80 81 82 83 84 85 86 87 88 89 90 91 92

class TestRandintOp_attr_tensorlist(OpTest):
    def setUp(self):
        self.op_type = "randint"
        self.new_shape = (10000, 784)
        shape_tensor = []
        for index, ele in enumerate(self.new_shape):
            shape_tensor.append(("x" + str(index), np.ones(
                (1)).astype("int64") * ele))
        self.inputs = {'ShapeTensorList': shape_tensor}
        self.init_attrs()
        self.outputs = {"Out": np.zeros((10000, 784)).astype("int32")}

    def init_attrs(self):
93
        self.attrs = {"low": -10, "high": 10, "seed": 10}
S
silingtong123 已提交
94 95 96 97 98 99 100 101 102
        self.output_hist = output_hist

    def test_check_output(self):
        self.check_output_customized(self.verify_output)

    def verify_output(self, outs):
        hist, prob = self.output_hist(np.array(outs[0]))
        self.assertTrue(
            np.allclose(
103
                hist, prob, rtol=0, atol=0.001), "hist: " + str(hist))
S
silingtong123 已提交
104

F
From00 已提交
105 106 107 108
    def test_check_output_eager(self):
        with _test_eager_guard():
            self.test_check_output()

S
silingtong123 已提交
109 110 111 112 113 114 115 116 117

class TestRandint_attr_tensor(OpTest):
    def setUp(self):
        self.op_type = "randint"
        self.inputs = {"ShapeTensor": np.array([10000, 784]).astype("int64")}
        self.init_attrs()
        self.outputs = {"Out": np.zeros((10000, 784)).astype("int64")}

    def init_attrs(self):
118
        self.attrs = {"low": -10, "high": 10, "seed": 10}
S
silingtong123 已提交
119 120 121 122 123 124 125 126 127
        self.output_hist = output_hist

    def test_check_output(self):
        self.check_output_customized(self.verify_output)

    def verify_output(self, outs):
        hist, prob = self.output_hist(np.array(outs[0]))
        self.assertTrue(
            np.allclose(
128
                hist, prob, rtol=0, atol=0.001), "hist: " + str(hist))
S
silingtong123 已提交
129

F
From00 已提交
130 131 132 133
    def test_check_output_eager(self):
        with _test_eager_guard():
            self.test_check_output()

S
silingtong123 已提交
134 135 136 137

# Test python API
class TestRandintAPI(unittest.TestCase):
    def test_api(self):
138
        with program_guard(Program(), Program()):
S
silingtong123 已提交
139
            # results are from [0, 5).
140
            out1 = paddle.randint(5)
S
silingtong123 已提交
141
            # shape is a list and dtype is 'int32'
142
            out2 = paddle.randint(
S
silingtong123 已提交
143 144
                low=-100, high=100, shape=[64, 64], dtype='int32')
            # shape is a tuple and dtype is 'int64'
145
            out3 = paddle.randint(
S
silingtong123 已提交
146 147
                low=-100, high=100, shape=(32, 32, 3), dtype='int64')
            # shape is a tensorlist and dtype is 'float32'
148 149
            dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 32)
            dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50)
150 151
            out4 = paddle.randint(
                low=-100, high=100, shape=[dim_1, 5, dim_2], dtype='int32')
S
silingtong123 已提交
152
            # shape is a tensor and dtype is 'float64'
153
            var_shape = paddle.static.data(
154 155
                name='var_shape', shape=[2], dtype="int64")
            out5 = paddle.randint(
S
silingtong123 已提交
156 157
                low=1, high=1000, shape=var_shape, dtype='int64')

158 159
            place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
            ) else paddle.CPUPlace()
160
            exe = paddle.static.Executor(place)
S
silingtong123 已提交
161 162
            outs = exe.run(
                feed={'var_shape': np.array([100, 100]).astype('int64')},
163
                fetch_list=[out1, out2, out3, out4, out5])
S
silingtong123 已提交
164

F
From00 已提交
165 166 167 168
    def test_api_eager(self):
        with _test_eager_guard():
            self.test_api()

S
silingtong123 已提交
169

170 171
class TestRandintImperative(unittest.TestCase):
    def test_api(self):
172
        paddle.disable_static()
F
From00 已提交
173 174 175 176 177 178 179 180 181 182

        self.run_test_case()

        with _test_eager_guard():
            self.run_test_case()

        paddle.enable_static()

    def run_test_case(self):
        n = 10
183 184 185 186 187 188
        x1 = paddle.randint(n, shape=[10], dtype="int32")
        x2 = paddle.tensor.randint(n)
        x3 = paddle.tensor.random.randint(n)
        for i in [x1, x2, x3]:
            for j in i.numpy().tolist():
                self.assertTrue((j >= 0 and j < n))
S
silingtong123 已提交
189 190


191 192 193 194 195 196 197 198 199 200 201 202
class TestRandomValue(unittest.TestCase):
    def test_fixed_random_number(self):
        # Test GPU Fixed random number, which is generated by 'curandStatePhilox4_32_10_t'
        if not paddle.is_compiled_with_cuda():
            return

        # Different GPU generatte different random value. Only test V100 here.
        if not "V100" in paddle.device.cuda.get_device_name():
            return

        print("Test Fixed Random number on GPU------>")
        paddle.disable_static()
F
From00 已提交
203 204 205 206 207 208 209 210 211

        self.run_test_case()

        with _test_eager_guard():
            self.run_test_case()

        paddle.enable_static()

    def run_test_case(self):
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
        paddle.set_device('gpu')
        paddle.seed(100)

        x = paddle.randint(
            -10000, 10000, [32, 3, 1024, 1024], dtype='int32').numpy()
        self.assertTrue(x.mean(), -0.7517569760481516)
        self.assertTrue(x.std(), 5773.696619107639)
        expect = [2535, 2109, 5916, -5011, -261]
        self.assertTrue(np.array_equal(x[10, 0, 100, 100:105], expect))
        expect = [3465, 7206, -8660, -9628, -6574]
        self.assertTrue(np.array_equal(x[20, 1, 600, 600:605], expect))
        expect = [881, 1560, 1100, 9664, 1669]
        self.assertTrue(np.array_equal(x[30, 2, 1000, 1000:1005], expect))

        x = paddle.randint(
            -10000, 10000, [32, 3, 1024, 1024], dtype='int64').numpy()
        self.assertTrue(x.mean(), -1.461287518342336)
        self.assertTrue(x.std(), 5773.023477548159)
        expect = [7213, -9597, 754, 8129, -1158]
        self.assertTrue(np.array_equal(x[10, 0, 100, 100:105], expect))
        expect = [-7159, 8054, 7675, 6980, 8506]
        self.assertTrue(np.array_equal(x[20, 1, 600, 600:605], expect))
        expect = [3581, 3420, -8027, -5237, -2436]
        self.assertTrue(np.array_equal(x[30, 2, 1000, 1000:1005], expect))


S
silingtong123 已提交
238 239
if __name__ == "__main__":
    unittest.main()