test_elementwise_add_op.py 13.1 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
G
gongweibao 已提交
16 17
import unittest
import numpy as np
K
Kexin Zhao 已提交
18
import paddle.fluid.core as core
19
from op_test import OpTest, skip_check_grad_ci
20 21
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
G
gongweibao 已提交
22 23


K
Kexin Zhao 已提交
24
class TestElementwiseAddOp(OpTest):
25 26 27
    def init_kernel_type(self):
        self.use_mkldnn = False

G
gongweibao 已提交
28 29
    def setUp(self):
        self.op_type = "elementwise_add"
K
Kexin Zhao 已提交
30 31
        self.init_dtype()
        self.init_input_output()
32
        self.init_kernel_type()
K
Kexin Zhao 已提交
33
        self.init_axis()
K
Kexin Zhao 已提交
34

G
gongweibao 已提交
35
        self.inputs = {
K
Kexin Zhao 已提交
36 37
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
G
gongweibao 已提交
38
        }
39
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
K
Kexin Zhao 已提交
40
        self.outputs = {'Out': self.out}
G
gongweibao 已提交
41 42

    def test_check_output(self):
43 44
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
        self.check_output(check_dygraph=(self.use_mkldnn == False))
G
gongweibao 已提交
45 46

    def test_check_grad_normal(self):
47
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
48 49
        if self.dtype == np.float16:
            return
50
        self.check_grad(
51
            ['X', 'Y'], 'Out', check_dygraph=(self.use_mkldnn == False))
G
gongweibao 已提交
52 53

    def test_check_grad_ingore_x(self):
54
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
55 56
        if self.dtype == np.float16:
            return
G
gongweibao 已提交
57
        self.check_grad(
58 59 60 61
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            check_dygraph=(self.use_mkldnn == False))
G
gongweibao 已提交
62 63

    def test_check_grad_ingore_y(self):
64
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
65 66
        if self.dtype == np.float16:
            return
G
gongweibao 已提交
67
        self.check_grad(
68 69 70 71
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            check_dygraph=(self.use_mkldnn == False))
G
gongweibao 已提交
72

K
Kexin Zhao 已提交
73 74 75 76 77 78
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)

    def init_dtype(self):
79
        self.dtype = np.float64
K
Kexin Zhao 已提交
80 81

    def init_axis(self):
82
        self.axis = -1
K
Kexin Zhao 已提交
83 84


85 86
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
K
Kexin Zhao 已提交
87
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
K
Kexin Zhao 已提交
88
    def init_dtype(self):
K
Kexin Zhao 已提交
89 90 91
        self.dtype = np.float16

    def test_check_output(self):
92
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
93 94 95
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
96 97
                self.check_output_with_place(
                    place, atol=1e-3, check_dygraph=(self.use_mkldnn == False))
K
Kexin Zhao 已提交
98

G
gongweibao 已提交
99

100 101
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
102
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
K
Kexin Zhao 已提交
103 104 105 106 107 108
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y


109 110
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
111 112 113 114 115
class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y
116 117


118 119
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
K
Kexin Zhao 已提交
120
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
121 122 123 124 125 126
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y


127 128
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
K
Kexin Zhao 已提交
129 130 131 132 133
class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y
134 135


K
Kexin Zhao 已提交
136
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
K
Kexin Zhao 已提交
137
    def init_input_output(self):
138 139
        self.x = np.random.random((100, )).astype(self.dtype)
        self.y = np.random.random((100, )).astype(self.dtype)
K
Kexin Zhao 已提交
140 141 142 143 144
        self.out = np.add(self.x, self.y)


class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
    def init_input_output(self):
145 146
        self.x = np.random.random((100, )).astype(self.dtype)
        self.y = np.random.random((100, )).astype(self.dtype)
K
Kexin Zhao 已提交
147
        self.out = np.add(self.x, self.y)
G
gongweibao 已提交
148 149


K
Kexin Zhao 已提交
150
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
151
    def init_input_output(self):
152 153 154
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
G
gongweibao 已提交
155

K
Kexin Zhao 已提交
156 157 158 159 160 161
    def init_axis(self):
        self.axis = 0


class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
162 163 164
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
K
Kexin Zhao 已提交
165 166 167

    def init_axis(self):
        self.axis = 0
G
gongweibao 已提交
168 169


K
Kexin Zhao 已提交
170
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
171
    def init_input_output(self):
172 173 174
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
G
gongweibao 已提交
175

K
Kexin Zhao 已提交
176 177 178 179 180 181
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
182 183 184
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
K
Kexin Zhao 已提交
185 186 187

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
188 189


K
Kexin Zhao 已提交
190
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
191
    def init_input_output(self):
192 193 194
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
195

K
Kexin Zhao 已提交
196 197 198

class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp):
    def init_input_output(self):
199 200 201
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
202 203


K
Kexin Zhao 已提交
204
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
K
Kexin Zhao 已提交
205
    def init_input_output(self):
206 207 208
        self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
209

K
Kexin Zhao 已提交
210 211 212 213 214 215
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp):
    def init_input_output(self):
216 217 218
        self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
K
Kexin Zhao 已提交
219 220 221

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
222 223


K
Kexin Zhao 已提交
224
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
K
Kexin Zhao 已提交
225
    def init_input_output(self):
226 227 228
        self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
229 230 231

    def init_axis(self):
        self.axis = 0
232

K
Kexin Zhao 已提交
233 234 235

class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp):
    def init_input_output(self):
236 237 238
        self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
239 240 241

    def init_axis(self):
        self.axis = 0
242 243


244 245
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
    def init_input_output(self):
246 247
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
248 249 250 251 252
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp):
    def init_input_output(self):
253 254
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
255 256 257 258 259
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
    def init_input_output(self):
260 261
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
262 263 264 265 266
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp):
    def init_input_output(self):
267 268
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
269 270 271
        self.out = self.x + self.y


K
Kexin Zhao 已提交
272
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
273
    def init_input_output(self):
274 275 276
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
Q
qijun 已提交
277

K
Kexin Zhao 已提交
278 279 280 281 282 283
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
284 285 286
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
K
Kexin Zhao 已提交
287 288 289

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
290 291


292 293
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
294
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
295
    def init_input_output(self):
296
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
297 298
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)
Q
qijun 已提交
299

K
Kexin Zhao 已提交
300 301 302 303
    def init_axis(self):
        self.axis = 1


304 305
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
306 307
class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
308
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
309 310 311 312 313
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
314 315


316 317
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
    def init_input_output(self):
318 319
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
320 321 322 323 324 325 326 327
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp):
    def init_input_output(self):
328 329
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
330 331 332 333 334 335
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


336 337
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
    def init_input_output(self):
338 339
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
340 341 342 343 344 345 346 347
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
    def init_input_output(self):
348 349
        self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
350 351 352 353 354 355 356 357
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
    def init_input_output(self):
358 359
        self.x = np.random.rand(10, 12).astype(self.dtype)
        self.y = np.random.rand(2, 3, 10, 12).astype(self.dtype)
360 361 362 363 364 365
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 2


366
class TestElementwiseAddOpError(unittest.TestCase):
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
    def test_errors(self):
        with program_guard(Program(), Program()):
            # the input of elementwise_add must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
            y1 = fluid.create_lod_tensor(
                np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1)

            # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64
            # float16 only can be set on GPU place
            x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8")
            y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8")
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2)


G
gongweibao 已提交
383 384
if __name__ == '__main__':
    unittest.main()