test_rand_op.py 4.5 KB
Newer Older
X
Xing Wu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np

import paddle.fluid.core as core
from paddle import rand
import paddle.fluid as fluid
21
from paddle.fluid import Program, program_guard
22
import paddle
X
Xing Wu 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35


class TestRandOpError(unittest.TestCase):
    """
    This class test the input type check.
    """

    def test_errors(self):
        main_prog = Program()
        start_prog = Program()
        with program_guard(main_prog, start_prog):

            def test_Variable():
36 37
                x1 = fluid.create_lod_tensor(np.zeros((4, 784)), [[1, 1, 1, 1]],
                                             fluid.CPUPlace())
X
Xing Wu 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
                rand(x1)

            self.assertRaises(TypeError, test_Variable)

            def test_dtype():
                dim_1 = fluid.layers.fill_constant([1], "int64", 3)
                dim_2 = fluid.layers.fill_constant([1], "int32", 5)
                rand(shape=[dim_1, dim_2], dtype='int32')

            self.assertRaises(TypeError, test_dtype)


class TestRandOp(unittest.TestCase):
    """
    This class test the common usages of randop.
    """

55
    def run_net(self, use_cuda=False):
X
Xing Wu 已提交
56 57 58 59 60 61
        place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
        exe = fluid.Executor(place)

        train_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
62 63 64
            result_0 = rand([3, 4])
            result_1 = rand([3, 4], 'float64')

X
Xing Wu 已提交
65 66 67
            dim_1 = fluid.layers.fill_constant([1], "int64", 3)
            dim_2 = fluid.layers.fill_constant([1], "int32", 5)
            result_2 = rand(shape=[dim_1, dim_2])
68

X
Xing Wu 已提交
69 70
            var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
            result_3 = rand(var_shape)
71

72 73 74
            var_shape_int32 = fluid.data(name='var_shape_int32',
                                         shape=[2],
                                         dtype="int32")
X
Xing Wu 已提交
75
            result_4 = rand(var_shape_int32)
76

X
Xing Wu 已提交
77 78 79 80
        exe.run(startup_program)

        x1 = np.array([3, 2]).astype('int64')
        x2 = np.array([4, 3]).astype('int32')
81 82
        ret = exe.run(
            train_program,
83 84 85 86
            feed={
                "var_shape": x1,
                "var_shape_int32": x2
            },
87 88 89 90 91 92
            fetch_list=[result_1, result_1, result_2, result_3, result_4])

    def test_run(self):
        self.run_net(False)
        if core.is_compiled_with_cuda():
            self.run_net(True)
X
Xing Wu 已提交
93 94 95 96 97 98 99


class TestRandOpForDygraph(unittest.TestCase):
    """
    This class test the common usages of randop.
    """

100 101 102 103 104 105 106
    def run_net(self, use_cuda=False):
        place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
        with fluid.dygraph.guard(place):
            rand([3, 4])

            rand([3, 4], 'float64')

X
Xing Wu 已提交
107 108 109
            dim_1 = fluid.layers.fill_constant([1], "int64", 3)
            dim_2 = fluid.layers.fill_constant([1], "int32", 5)
            rand(shape=[dim_1, dim_2])
110

X
Xing Wu 已提交
111 112 113
            var_shape = fluid.dygraph.to_variable(np.array([3, 4]))
            rand(var_shape)

114 115 116 117 118
    def test_run(self):
        self.run_net(False)
        if core.is_compiled_with_cuda():
            self.run_net(True)

X
Xing Wu 已提交
119

120
class TestRandDtype(unittest.TestCase):
121

122 123 124
    def test_default_dtype(self):
        paddle.disable_static()

125
        def test_default_fp16():
126 127 128
            paddle.framework.set_default_dtype('float16')
            paddle.tensor.random.rand([2, 3])

129
        self.assertRaises(TypeError, test_default_fp16)
130

131
        def test_default_fp32():
132 133 134 135
            paddle.framework.set_default_dtype('float32')
            out = paddle.tensor.random.rand([2, 3])
            self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32)

136
        def test_default_fp64():
137 138 139 140
            paddle.framework.set_default_dtype('float64')
            out = paddle.tensor.random.rand([2, 3])
            self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)

141 142
        test_default_fp64()
        test_default_fp32()
143 144 145 146

        paddle.enable_static()


X
Xing Wu 已提交
147 148
if __name__ == "__main__":
    unittest.main()