test_mul_op.py 5.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
19
import paddle.fluid.core as core
20
from op_test import OpTest
21 22
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
Q
qijun 已提交
23 24


Q
qijun 已提交
25
class TestMulOp(OpTest):
Q
qijun 已提交
26
    def setUp(self):
Q
qijun 已提交
27
        self.op_type = "mul"
C
chengduo 已提交
28 29
        self.dtype = np.float32
        self.init_dtype_type()
D
dangqingqing 已提交
30
        self.inputs = {
C
chengduo 已提交
31 32
            'X': np.random.random((2, 5)).astype(self.dtype),
            'Y': np.random.random((5, 3)).astype(self.dtype)
D
dangqingqing 已提交
33 34
        }
        self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
Q
qijun 已提交
35

C
chengduo 已提交
36 37 38
    def init_dtype_type(self):
        pass

Q
qijun 已提交
39 40 41 42 43
    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5)
Q
qijun 已提交
44

Q
qijun 已提交
45 46 47
    def test_check_grad_ingore_x(self):
        self.check_grad(
            ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X"))
48

Q
qijun 已提交
49 50 51 52 53
    def test_check_grad_ingore_y(self):
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))


54
class TestMulOpError(unittest.TestCase):
55 56 57 58 59 60 61 62 63 64 65 66 67 68
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of mul_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            x2 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.mul, x1, x2)
            # The input dtype of mul_op must be float32 or float64.
            x3 = fluid.layers.data(name='x3', shape=[4], dtype="int32")
            x4 = fluid.layers.data(name='x4', shape=[4], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.mul, x3, x4)


Q
qijun 已提交
69
class TestMulOp2(OpTest):
70
    def setUp(self):
Q
qijun 已提交
71
        self.op_type = "mul"
C
chengduo 已提交
72 73
        self.dtype = np.float32
        self.init_dtype_type()
74
        self.inputs = {
C
chengduo 已提交
75 76
            'X': np.random.random((3, 4, 4, 3)).astype(self.dtype),
            'Y': np.random.random((2, 6, 1, 2, 3)).astype(self.dtype)
77
        }
78 79 80 81 82 83 84
        self.attrs = {
            'x_num_col_dims': 2,
            'y_num_col_dims': 2,
        }
        result = np.dot(self.inputs['X'].reshape(3 * 4, 4 * 3),
                        self.inputs['Y'].reshape(2 * 6, 1 * 2 * 3))
        result = result.reshape(3, 4, 1, 2, 3)
Y
Yu Yang 已提交
85
        self.outputs = {'Out': result}
86

C
chengduo 已提交
87 88 89
    def init_dtype_type(self):
        pass

Q
qijun 已提交
90 91
    def test_check_output(self):
        self.check_output()
92

Q
qijun 已提交
93 94
    def test_check_grad_normal(self):
        self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5)
95

Q
qijun 已提交
96
    def test_check_grad_ingore_x(self):
97
        self.check_grad(
Q
qijun 已提交
98
            ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set('X'))
99

Q
qijun 已提交
100
    def test_check_grad_ignore_y(self):
101
        self.check_grad(
Q
qijun 已提交
102
            ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))
103 104


C
chengduo 已提交
105 106 107 108 109
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestFP16MulOp1(TestMulOp):
    def init_dtype_type(self):
        self.dtype = np.float16
110 111

    def test_check_output(self):
C
chengduo 已提交
112 113 114
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_output_with_place(place, atol=1e-1)
115

C
chengduo 已提交
116 117 118 119 120
    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['X', 'Y'], 'Out', max_relative_error=0.5)
121

C
chengduo 已提交
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['Y'],
                'Out',
                max_relative_error=0.5,
                no_grad_set=set("X"))

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['X'],
                'Out',
                max_relative_error=0.5,
                no_grad_set=set('Y'))


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestFP16MulOp2(TestMulOp2):
    def init_dtype_type(self):
        self.dtype = np.float16
146 147

    def test_check_output(self):
C
chengduo 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_output_with_place(place, atol=2e-1)

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['X', 'Y'], 'Out', max_relative_error=0.9)

    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['Y'],
                'Out',
                max_relative_error=0.5,
                no_grad_set=set("X"))

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['X'],
                'Out',
                max_relative_error=0.9,
                no_grad_set=set('Y'))
175 176


Q
qijun 已提交
177
if __name__ == "__main__":
Q
qijun 已提交
178
    unittest.main()