test_mul_op.py 7.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
19
import paddle
20
import paddle.fluid.core as core
21
from op_test import OpTest
22 23
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
Q
qijun 已提交
24 25


Q
qijun 已提交
26
class TestMulOp(OpTest):
Q
qijun 已提交
27
    def setUp(self):
Q
qijun 已提交
28
        self.op_type = "mul"
29
        self.dtype = np.float64
C
chengduo 已提交
30
        self.init_dtype_type()
D
dangqingqing 已提交
31
        self.inputs = {
32 33
            'X': np.random.random((20, 5)).astype(self.dtype),
            'Y': np.random.random((5, 21)).astype(self.dtype)
D
dangqingqing 已提交
34 35
        }
        self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
Q
qijun 已提交
36

C
chengduo 已提交
37 38 39
    def init_dtype_type(self):
        pass

Q
qijun 已提交
40 41 42 43
    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
44
        self.check_grad(['X', 'Y'], 'Out')
Q
qijun 已提交
45

Q
qijun 已提交
46 47 48
    def test_check_grad_ingore_x(self):
        self.check_grad(
            ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X"))
49

Q
qijun 已提交
50 51 52 53 54
    def test_check_grad_ingore_y(self):
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))


55
class TestMulOpError(unittest.TestCase):
56 57 58 59 60 61 62 63 64 65 66 67 68 69
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of mul_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            x2 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.mul, x1, x2)
            # The input dtype of mul_op must be float32 or float64.
            x3 = fluid.layers.data(name='x3', shape=[4], dtype="int32")
            x4 = fluid.layers.data(name='x4', shape=[4], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.mul, x3, x4)


Q
qijun 已提交
70
class TestMulOp2(OpTest):
71
    def setUp(self):
Q
qijun 已提交
72
        self.op_type = "mul"
73
        self.dtype = np.float64
C
chengduo 已提交
74
        self.init_dtype_type()
75
        self.inputs = {
76 77
            'X': np.random.random((3, 4, 2, 9)).astype(self.dtype),
            'Y': np.random.random((3, 6, 1, 2, 3)).astype(self.dtype)
78
        }
79 80 81 82
        self.attrs = {
            'x_num_col_dims': 2,
            'y_num_col_dims': 2,
        }
83 84
        result = np.dot(self.inputs['X'].reshape(3 * 4, 2 * 9),
                        self.inputs['Y'].reshape(3 * 6, 1 * 2 * 3))
85
        result = result.reshape(3, 4, 1, 2, 3)
Y
Yu Yang 已提交
86
        self.outputs = {'Out': result}
87

C
chengduo 已提交
88 89 90
    def init_dtype_type(self):
        pass

Q
qijun 已提交
91 92
    def test_check_output(self):
        self.check_output()
93

Q
qijun 已提交
94
    def test_check_grad_normal(self):
95
        self.check_grad(['X', 'Y'], 'Out')
96

Q
qijun 已提交
97
    def test_check_grad_ingore_x(self):
98
        self.check_grad(
Q
qijun 已提交
99
            ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set('X'))
100

Q
qijun 已提交
101
    def test_check_grad_ignore_y(self):
102
        self.check_grad(
Q
qijun 已提交
103
            ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))
104 105


C
chengduo 已提交
106 107 108 109 110
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestFP16MulOp1(TestMulOp):
    def init_dtype_type(self):
        self.dtype = np.float16
111 112

    def test_check_output(self):
C
chengduo 已提交
113 114 115
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_output_with_place(place, atol=1e-1)
116

C
chengduo 已提交
117 118 119 120 121
    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['X', 'Y'], 'Out', max_relative_error=0.5)
122

C
chengduo 已提交
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['Y'],
                'Out',
                max_relative_error=0.5,
                no_grad_set=set("X"))

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['X'],
                'Out',
                max_relative_error=0.5,
                no_grad_set=set('Y'))


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestFP16MulOp2(TestMulOp2):
    def init_dtype_type(self):
        self.dtype = np.float16
147 148

    def test_check_output(self):
C
chengduo 已提交
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_output_with_place(place, atol=2e-1)

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['X', 'Y'], 'Out', max_relative_error=0.9)

    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['Y'],
                'Out',
                max_relative_error=0.5,
                no_grad_set=set("X"))

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['X'],
                'Out',
                max_relative_error=0.9,
                no_grad_set=set('Y'))
176 177


178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
@unittest.skipIf(not core.is_compiled_with_xpu(),
                 "core is not compiled with XPU")
class TestXPUMulOp1(TestMulOp):
    def init_dtype_type(self):
        self.dtype = np.float32

    def test_check_output(self):
        place = core.XPUPlace(0)
        self.check_output_with_place(place, atol=1e-1)

    def test_check_grad_normal(self):
        place = core.XPUPlace(0)
        self.check_grad_with_place(
            place, ['X', 'Y'], 'Out', max_relative_error=0.5)

    def test_check_grad_ingore_x(self):
        place = core.XPUPlace(0)
        self.check_grad_with_place(
            place, ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X"))

    def test_check_grad_ingore_y(self):
        place = core.XPUPlace(0)
        self.check_grad_with_place(
            place, ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))


@unittest.skipIf(not core.is_compiled_with_xpu(),
                 "core is not compiled with XPU")
class TestXPUMulOp2(TestMulOp2):
    def init_dtype_type(self):
        self.dtype = np.float32

    def test_check_output(self):
        place = core.XPUPlace(0)
        self.check_output_with_place(place, atol=2e-1)

    def test_check_grad_normal(self):
        place = core.XPUPlace(0)
        self.check_grad_with_place(
            place, ['X', 'Y'], 'Out', max_relative_error=0.9)

    def test_check_grad_ingore_x(self):
        place = core.XPUPlace(0)
        self.check_grad_with_place(
            place, ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X"))

    def test_check_grad_ingore_y(self):
        place = core.XPUPlace(0)
        self.check_grad_with_place(
            place, ['X'], 'Out', max_relative_error=0.9, no_grad_set=set('Y'))


Q
qijun 已提交
230
if __name__ == "__main__":
Q
qijun 已提交
231
    unittest.main()