test_random_seed.py 14.8 KB
Newer Older
Y
yaoxuefeng 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cloud role maker."""

import unittest

import numpy as np
19

Y
yaoxuefeng 已提交
20
import paddle
21
import paddle.fluid as fluid
Y
yaoxuefeng 已提交
22
import paddle.fluid.core as core
23
import paddle.fluid.generator as generator
24
from paddle.tensor import random
Y
yaoxuefeng 已提交
25 26 27


class TestGeneratorSeed(unittest.TestCase):
L
Leo Chen 已提交
28 29 30
    #     """
    #     Test cases for cpu generator seed.
    #     """
Y
yaoxuefeng 已提交
31 32 33 34 35 36

    def test_generator_uniform_random_dygraph(self):
        """Test Generator seed."""

        fluid.enable_dygraph()

C
cnn 已提交
37
        gen = paddle.seed(12312321111)
38
        x = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
L
Leo Chen 已提交
39

Y
yaoxuefeng 已提交
40
        st1 = gen.get_state()
41
        x1 = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
L
Leo Chen 已提交
42

Y
yaoxuefeng 已提交
43
        gen.set_state(st1)
L
Leo Chen 已提交
44
        print(gen.get_state())
45
        x2 = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
L
Leo Chen 已提交
46

C
cnn 已提交
47
        paddle.seed(12312321111)
48
        x3 = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
L
Leo Chen 已提交
49

Y
yaoxuefeng 已提交
50 51 52 53 54 55
        x_np = x.numpy()
        x1_np = x1.numpy()
        x2_np = x2.numpy()
        x3_np = x3.numpy()

        if not core.is_compiled_with_cuda():
56 57
            np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05)
            np.testing.assert_allclose(x_np, x3_np, rtol=1e-05)
Y
yaoxuefeng 已提交
58 59 60 61

    def test_generator_uniform_random_static(self):
        fluid.disable_dygraph()

C
cnn 已提交
62
        gen = paddle.seed(123123143)
Y
yaoxuefeng 已提交
63 64 65 66 67 68

        startup_program = fluid.Program()
        train_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            # example 1:
            # attr shape is a list which doesn't contain tensor Variable.
69 70
            result_1 = paddle.uniform(shape=[3, 4])
            result_2 = paddle.uniform(shape=[3, 4])
Y
yaoxuefeng 已提交
71 72 73

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(startup_program)
74 75 76 77
            out1 = exe.run(
                train_program, feed={}, fetch_list=[result_1, result_2]
            )
            # gen.set_state(cur_state)
Y
yaoxuefeng 已提交
78
            gen.manual_seed(123123143)
79 80 81
            out2 = exe.run(
                train_program, feed={}, fetch_list=[result_1, result_2]
            )
Y
yaoxuefeng 已提交
82 83 84 85 86 87 88

            out1_res1 = np.array(out1[0])
            out1_res2 = np.array(out1[1])
            out2_res1 = np.array(out2[0])
            out2_res2 = np.array(out2[1])

            if not core.is_compiled_with_cuda():
89 90
                np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05)
                np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05)
Y
yaoxuefeng 已提交
91 92
                self.assertTrue(not np.allclose(out1_res2, out1_res1))

93 94 95
    def test_gen_dropout_dygraph(self):
        fluid.enable_dygraph()

C
cnn 已提交
96
        gen = paddle.seed(111111111)
97 98
        st = gen.get_state()
        # x = np.arange(1,101).reshape(2,50).astype("float32")
99
        x = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
C
ccrrong 已提交
100
        y = paddle.nn.functional.dropout(x, 0.5)
101
        gen.manual_seed(111111111)
102
        # gen.set_state(st)
103
        x1 = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
C
ccrrong 已提交
104
        y1 = paddle.nn.functional.dropout(x1, 0.5)
105 106
        y_np = y.numpy()
        y1_np = y1.numpy()
L
Leo Chen 已提交
107

108 109
        if not core.is_compiled_with_cuda():
            print(">>>>>>> dropout dygraph >>>>>>>")
110
            np.testing.assert_allclose(y_np, y1_np, rtol=1e-05)
111 112 113 114

    def test_gen_dropout_static(self):
        fluid.disable_dygraph()

C
cnn 已提交
115
        gen = paddle.seed(123123143)
116 117 118 119 120 121

        startup_program = fluid.Program()
        train_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            # example 1:
            # attr shape is a list which doesn't contain tensor Variable.
122
            x_1 = paddle.uniform(shape=[2, 10])
C
ccrrong 已提交
123
            y_1 = paddle.nn.functional.dropout(x_1, 0.5)
124 125 126
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(startup_program)
            out1 = exe.run(train_program, feed={}, fetch_list=[y_1])
127
            # gen.set_state(cur_state)
128 129 130 131
            gen.manual_seed(123123143)
            out2 = exe.run(train_program, feed={}, fetch_list=[y_1])
        out1_np = np.array(out1[0])
        out2_np = np.array(out2[0])
L
Leo Chen 已提交
132

133 134
        if not core.is_compiled_with_cuda():
            print(">>>>>>> dropout static >>>>>>>")
135
            np.testing.assert_allclose(out1_np, out2_np, rtol=1e-05)
136 137 138 139 140

    def test_generator_gaussian_random_dygraph(self):
        """Test Generator seed."""
        fluid.enable_dygraph()

C
cnn 已提交
141
        gen = paddle.seed(12312321111)
142
        x = random.gaussian([10], dtype="float32")
143
        st1 = gen.get_state()
144
        x1 = random.gaussian([10], dtype="float32")
145
        gen.set_state(st1)
146
        x2 = random.gaussian([10], dtype="float32")
147
        gen.manual_seed(12312321111)
148
        x3 = random.gaussian([10], dtype="float32")
149 150 151 152 153 154 155
        x_np = x.numpy()
        x1_np = x1.numpy()
        x2_np = x2.numpy()
        x3_np = x3.numpy()

        if not core.is_compiled_with_cuda():
            print(">>>>>>> gaussian random dygraph >>>>>>>")
156 157
            np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05)
            np.testing.assert_allclose(x_np, x3_np, rtol=1e-05)
158 159 160 161

    def test_generator_gaussian_random_static(self):
        fluid.disable_dygraph()

C
cnn 已提交
162
        gen = paddle.seed(123123143)
163 164 165 166 167 168

        startup_program = fluid.Program()
        train_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            # example 1:
            # attr shape is a list which doesn't contain tensor Variable.
169 170
            result_1 = random.gaussian(shape=[3, 4])
            result_2 = random.gaussian(shape=[3, 4])
171 172 173

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(startup_program)
174 175 176 177
            out1 = exe.run(
                train_program, feed={}, fetch_list=[result_1, result_2]
            )
            # gen.set_state(cur_state)
178
            gen.manual_seed(123123143)
179 180 181
            out2 = exe.run(
                train_program, feed={}, fetch_list=[result_1, result_2]
            )
182 183 184 185 186 187 188 189

            out1_res1 = np.array(out1[0])
            out1_res2 = np.array(out1[1])
            out2_res1 = np.array(out2[0])
            out2_res2 = np.array(out2[1])

            if not core.is_compiled_with_cuda():
                print(">>>>>>> gaussian random static >>>>>>>")
190 191
                np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05)
                np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05)
192 193
                self.assertTrue(not np.allclose(out1_res2, out1_res1))

Y
yaoxuefeng 已提交
194 195 196 197 198 199
    def test_generator_randint_dygraph(self):
        """Test Generator seed."""
        gen = generator.Generator()

        fluid.enable_dygraph()

C
cnn 已提交
200
        gen = paddle.seed(12312321111)
201
        x = paddle.randint(low=10, shape=[10], dtype="int32")
Y
yaoxuefeng 已提交
202
        st1 = gen.get_state()
203
        x1 = paddle.randint(low=10, shape=[10], dtype="int32")
Y
yaoxuefeng 已提交
204
        gen.set_state(st1)
205
        x2 = paddle.randint(low=10, shape=[10], dtype="int32")
Y
yaoxuefeng 已提交
206
        gen.manual_seed(12312321111)
207
        x3 = paddle.randint(low=10, shape=[10], dtype="int32")
Y
yaoxuefeng 已提交
208 209 210 211
        x_np = x.numpy()
        x1_np = x1.numpy()
        x2_np = x2.numpy()
        x3_np = x3.numpy()
212

Y
yaoxuefeng 已提交
213
        if not core.is_compiled_with_cuda():
214
            print(">>>>>>> randint dygraph >>>>>>>")
215 216
            np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05)
            np.testing.assert_allclose(x_np, x3_np, rtol=1e-05)
Y
yaoxuefeng 已提交
217

Z
zhangchunle 已提交
218
    def test_generator_uniform_random_static_1(self):
L
Leo Chen 已提交
219 220
        fluid.disable_dygraph()

C
cnn 已提交
221
        gen = paddle.seed(123123143)
L
Leo Chen 已提交
222 223 224 225 226 227

        startup_program = fluid.Program()
        train_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            # example 1:
            # attr shape is a list which doesn't contain tensor Variable.
228 229
            result_1 = paddle.uniform(shape=[3, 4])
            result_2 = paddle.uniform(shape=[3, 4])
L
Leo Chen 已提交
230 231 232

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(startup_program)
233 234 235 236
            out1 = exe.run(
                train_program, feed={}, fetch_list=[result_1, result_2]
            )
            # gen.set_state(cur_state)
L
Leo Chen 已提交
237
            gen.manual_seed(123123143)
238 239 240
            out2 = exe.run(
                train_program, feed={}, fetch_list=[result_1, result_2]
            )
241

L
Leo Chen 已提交
242 243 244 245 246 247
            out1_res1 = np.array(out1[0])
            out1_res2 = np.array(out1[1])
            out2_res1 = np.array(out2[0])
            out2_res2 = np.array(out2[1])

            if not core.is_compiled_with_cuda():
248 249
                np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05)
                np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05)
L
Leo Chen 已提交
250 251
                self.assertTrue(not np.allclose(out1_res2, out1_res1))

Z
zhangchunle 已提交
252
    def test_generator_randint_dygraph_1(self):
L
Leo Chen 已提交
253 254 255
        """Test Generator seed."""
        fluid.enable_dygraph()

C
cnn 已提交
256
        gen = paddle.seed(12312321111)
L
Leo Chen 已提交
257 258 259 260 261 262 263 264 265 266 267 268
        x = paddle.randint(low=1)
        st1 = gen.get_state()
        x1 = paddle.randint(low=1)
        gen.set_state(st1)
        x2 = paddle.randint(low=1)
        gen.manual_seed(12312321111)
        x3 = paddle.randint(low=1)
        x_np = x.numpy()
        x1_np = x1.numpy()
        x2_np = x2.numpy()
        x3_np = x3.numpy()
        if not core.is_compiled_with_cuda():
269 270
            np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05)
            np.testing.assert_allclose(x_np, x3_np, rtol=1e-05)
L
Leo Chen 已提交
271 272

    def test_generator_ranint_static(self):
273 274
        fluid.disable_dygraph()

C
cnn 已提交
275
        gen = paddle.seed(123123143)
276 277 278 279 280 281 282 283 284 285 286

        startup_program = fluid.Program()
        train_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            # example 1:
            # attr shape is a list which doesn't contain tensor Variable.
            result_1 = paddle.randint(low=10, shape=[3, 4])
            result_2 = paddle.randint(low=10, shape=[3, 4])

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(startup_program)
287 288 289 290
            out1 = exe.run(
                train_program, feed={}, fetch_list=[result_1, result_2]
            )
            # gen.set_state(cur_state)
291
            gen.manual_seed(123123143)
292 293 294
            out2 = exe.run(
                train_program, feed={}, fetch_list=[result_1, result_2]
            )
295 296 297 298 299 300 301 302

            out1_res1 = np.array(out1[0])
            out1_res2 = np.array(out1[1])
            out2_res1 = np.array(out2[0])
            out2_res2 = np.array(out2[1])

            if not core.is_compiled_with_cuda():
                print(">>>>>>> randint static >>>>>>>")
303 304
                np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05)
                np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05)
305 306 307 308 309 310 311
                self.assertTrue(not np.allclose(out1_res2, out1_res1))

    def test_generator_randperm_dygraph(self):
        """Test Generator seed."""

        fluid.enable_dygraph()

C
cnn 已提交
312
        gen = paddle.seed(12312321111)
313 314 315 316 317 318 319 320 321 322 323 324 325 326
        x = paddle.randperm(10)
        st1 = gen.get_state()
        x1 = paddle.randperm(10)
        gen.set_state(st1)
        x2 = paddle.randperm(10)
        gen.manual_seed(12312321111)
        x3 = paddle.randperm(10)
        x_np = x.numpy()
        x1_np = x1.numpy()
        x2_np = x2.numpy()
        x3_np = x3.numpy()

        if not core.is_compiled_with_cuda():
            print(">>>>>>> randperm dygraph >>>>>>>")
327 328
            np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05)
            np.testing.assert_allclose(x_np, x3_np, rtol=1e-05)
329 330 331 332 333

    def test_generator_randperm_static(self):

        fluid.disable_dygraph()

C
cnn 已提交
334
        paddle.seed(123123143)
335 336 337 338 339 340 341 342 343 344 345

        startup_program = fluid.Program()
        train_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            # example 1:
            # attr shape is a list which doesn't contain tensor Variable.
            result_1 = paddle.randperm(10)
            result_2 = paddle.randperm(10)

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(startup_program)
346 347 348
            out1 = exe.run(
                train_program, feed={}, fetch_list=[result_1, result_2]
            )
L
Leo Chen 已提交
349

C
cnn 已提交
350
            paddle.seed(123123143)
351 352 353
            out2 = exe.run(
                train_program, feed={}, fetch_list=[result_1, result_2]
            )
354 355 356 357 358 359 360 361

            out1_res1 = np.array(out1[0])
            out1_res2 = np.array(out1[1])
            out2_res1 = np.array(out2[0])
            out2_res2 = np.array(out2[1])

            if not core.is_compiled_with_cuda():
                print(">>>>>>> randperm static >>>>>>>")
362 363
                np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05)
                np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05)
364 365 366 367 368
                self.assertTrue(not np.allclose(out1_res2, out1_res1))

    def test_gen_TruncatedNormal_initializer(self):
        fluid.disable_dygraph()

C
cnn 已提交
369
        gen = paddle.seed(123123143)
370 371 372 373 374 375 376
        cur_state = gen.get_state()

        startup_program = fluid.Program()
        train_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            # example 1:
            # attr shape is a list which doesn't contain tensor Variable.
377
            x = paddle.uniform(shape=[2, 10])
378 379 380
            result_1 = fluid.layers.fc(
                input=x,
                size=10,
381 382 383 384
                param_attr=fluid.initializer.TruncatedNormal(
                    loc=0.0, scale=2.0
                ),
            )
385 386 387
            result_2 = fluid.layers.fc(
                input=x,
                size=10,
388 389 390 391
                param_attr=fluid.initializer.TruncatedNormal(
                    loc=0.0, scale=2.0
                ),
            )
392 393 394

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(startup_program)
395 396 397
            out1 = exe.run(
                train_program, feed={}, fetch_list=[result_1, result_2]
            )
398 399 400 401

        gen.manual_seed(123123143)
        with fluid.program_guard(train_program, startup_program):
            exe.run(startup_program)
402 403 404
            out2 = exe.run(
                train_program, feed={}, fetch_list=[result_1, result_2]
            )
405 406 407 408 409 410 411 412

        out1_res1 = np.array(out1[0])
        out1_res2 = np.array(out1[1])
        out2_res1 = np.array(out2[0])
        out2_res2 = np.array(out2[1])

        if not core.is_compiled_with_cuda():
            print(">>>>>>> sampling id static >>>>>>>")
413 414
            np.testing.assert_allclose(out1_res1, out2_res1, rtol=1e-05)
            np.testing.assert_allclose(out1_res2, out2_res2, rtol=1e-05)
415 416
            self.assertTrue(not np.allclose(out1_res2, out1_res1))

Y
yaoxuefeng 已提交
417 418 419

if __name__ == "__main__":
    unittest.main()