test_fc_op.py 5.9 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
L
Leo Chen 已提交
16
import paddle
T
tensor-tang 已提交
17 18
import numpy as np
from op_test import OpTest
19
import paddle.fluid as fluid
20 21 22
from paddle.fluid import Program, program_guard, core

SEED = 2020
T
tensor-tang 已提交
23 24


25
def fc_refer(matrix, with_bias, with_relu=False):
T
tensor-tang 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38
    in_n, in_c, in_h, in_w = matrix.input.shape
    w_i, w_o = matrix.weights.shape

    x_data = np.reshape(matrix.input, [in_n, in_c * in_h * in_w])
    w_data = np.reshape(matrix.weights, [w_i, w_o])
    b_data = np.reshape(matrix.bias, [1, w_o])
    result = None

    if with_bias:
        result = np.dot(x_data, w_data) + b_data
    else:
        result = np.dot(x_data, w_data)

39 40 41 42
    if with_relu:
        return np.maximum(result, 0)
    else:
        return result
T
tensor-tang 已提交
43 44 45


class MatrixGenerate:
46
    def __init__(self, mb, ic, oc, h, w, bias_dims=2):
T
tensor-tang 已提交
47 48
        self.input = np.random.random((mb, ic, h, w)).astype("float32")
        self.weights = np.random.random((ic * h * w, oc)).astype("float32")
49 50 51 52
        if bias_dims == 2:
            self.bias = np.random.random((1, oc)).astype("float32")
        else:
            self.bias = np.random.random((oc)).astype("float32")
T
tensor-tang 已提交
53 54 55


class TestFCOp(OpTest):
56 57 58 59 60
    def config(self):
        self.with_bias = True
        self.with_relu = True
        self.matrix = MatrixGenerate(1, 10, 15, 3, 3, 2)

T
tensor-tang 已提交
61 62
    def setUp(self):
        self.op_type = "fc"
63
        self.config()
T
tensor-tang 已提交
64 65 66 67 68 69 70 71 72 73

        if self.with_bias:
            self.inputs = {
                'Input': self.matrix.input,
                'W': self.matrix.weights,
                'Bias': self.matrix.bias
            }
        else:
            self.inputs = {'Input': self.matrix.input, 'W': self.matrix.weights}

74 75 76 77 78
        if self.with_relu:
            activation_type = "relu"
        else:
            activation_type = ""
        self.attrs = {'use_mkldnn': False, 'activation_type': activation_type}
T
tensor-tang 已提交
79

80 81 82
        self.outputs = {
            'Out': fc_refer(self.matrix, self.with_bias, self.with_relu)
        }
T
tensor-tang 已提交
83 84 85 86 87

    def test_check_output(self):
        self.check_output()


88 89
class TestFCOpNoBias1(TestFCOp):
    def config(self):
90
        self.with_bias = False
91 92
        self.with_relu = False
        self.matrix = MatrixGenerate(2, 8, 10, 1, 1, 2)
93

T
tensor-tang 已提交
94

95 96 97 98 99
class TestFCOpNoBias2(TestFCOp):
    def config(self):
        self.with_bias = False
        self.with_relu = False
        self.matrix = MatrixGenerate(4, 5, 6, 2, 2, 1)
T
tensor-tang 已提交
100 101


102 103 104 105 106
class TestFCOpNoBias4(TestFCOp):
    def config(self):
        self.with_bias = False
        self.with_relu = False
        self.matrix = MatrixGenerate(1, 32, 64, 3, 3, 1)
T
tensor-tang 已提交
107 108


109 110 111 112 113
class TestFCOpWithBias1(TestFCOp):
    def config(self):
        self.with_bias = True
        self.with_relu = False
        self.matrix = MatrixGenerate(3, 8, 10, 2, 1, 2)
114 115


116 117 118 119 120
class TestFCOpWithBias2(TestFCOp):
    def config(self):
        self.with_bias = True
        self.with_relu = True
        self.matrix = MatrixGenerate(4, 5, 6, 2, 2, 1)
121 122


123 124 125 126 127
class TestFCOpWithBias3(TestFCOp):
    def config(self):
        self.with_bias = True
        self.with_relu = True
        self.matrix = MatrixGenerate(1, 64, 32, 3, 3, 1)
128 129


130 131 132 133 134 135 136
class TestFCOpWithPadding(TestFCOp):
    def config(self):
        self.with_bias = True
        self.with_relu = True
        self.matrix = MatrixGenerate(1, 4, 3, 128, 128, 2)


137 138
class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase):
    def test_api(self):
L
Leo Chen 已提交
139
        def run_program(num_flatten_dims):
C
cnn 已提交
140
            paddle.seed(SEED)
L
Leo Chen 已提交
141 142 143 144 145 146 147 148 149 150 151
            startup_program = Program()
            main_program = Program()

            with program_guard(main_program, startup_program):
                input = np.random.random([2, 2, 25]).astype("float32")
                x = fluid.layers.data(
                    name="x",
                    shape=[2, 2, 25],
                    append_batch_size=False,
                    dtype="float32")

152 153 154
                out = paddle.static.nn.fc(x=x,
                                          size=1,
                                          num_flatten_dims=num_flatten_dims)
L
Leo Chen 已提交
155 156 157 158 159 160 161 162 163 164

            place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
            ) else fluid.CUDAPlace(0)
            exe = fluid.Executor(place=place)
            exe.run(startup_program)
            out = exe.run(main_program, feed={"x": input}, fetch_list=[out])

        res_1 = run_program(-1)
        res_2 = run_program(2)
        self.assertTrue(np.array_equal(res_1, res_2))
165 166


167
class TestFCOpError(unittest.TestCase):
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
    def test_errors(self):
        with program_guard(Program(), Program()):
            input_data = np.random.random((2, 4)).astype("float32")

            def test_Variable():
                # the input type must be Variable
                fluid.layers.fc(input=input_data, size=1)

            self.assertRaises(TypeError, test_Variable)

            def test_input_list():
                # each of input(list) must be Variable
                fluid.layers.fc(input=[input_data], size=1)

            self.assertRaises(TypeError, test_input_list)

            def test_type():
                # dtype must be float32 or float64
                x2 = fluid.layers.data(name='x2', shape=[4], dtype='int32')
                fluid.layers.fc(input=x2, size=1)

            self.assertRaises(TypeError, test_type)

191 192 193 194
            # The input dtype of fc can be float16 in GPU, test for warning
            x3 = fluid.layers.data(name='x3', shape=[4], dtype='float16')
            fluid.layers.fc(input=x3, size=1)

195

T
tensor-tang 已提交
196 197
if __name__ == "__main__":
    unittest.main()