test_elementwise_add_op.py 16.9 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
G
gongweibao 已提交
16 17
import unittest
import numpy as np
18
import paddle
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
20
from op_test import OpTest, skip_check_grad_ci
21 22
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
G
gongweibao 已提交
23 24


K
Kexin Zhao 已提交
25
class TestElementwiseAddOp(OpTest):
26 27 28
    def init_kernel_type(self):
        self.use_mkldnn = False

G
gongweibao 已提交
29 30
    def setUp(self):
        self.op_type = "elementwise_add"
K
Kexin Zhao 已提交
31 32
        self.init_dtype()
        self.init_input_output()
33
        self.init_kernel_type()
K
Kexin Zhao 已提交
34
        self.init_axis()
K
Kexin Zhao 已提交
35

G
gongweibao 已提交
36
        self.inputs = {
K
Kexin Zhao 已提交
37 38
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
G
gongweibao 已提交
39
        }
40
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
K
Kexin Zhao 已提交
41
        self.outputs = {'Out': self.out}
G
gongweibao 已提交
42 43

    def test_check_output(self):
44 45
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
        self.check_output(check_dygraph=(self.use_mkldnn == False))
G
gongweibao 已提交
46 47

    def test_check_grad_normal(self):
48
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
49 50
        if self.dtype == np.float16:
            return
51
        self.check_grad(
52
            ['X', 'Y'], 'Out', check_dygraph=(self.use_mkldnn == False))
G
gongweibao 已提交
53 54

    def test_check_grad_ingore_x(self):
55
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
56 57
        if self.dtype == np.float16:
            return
G
gongweibao 已提交
58
        self.check_grad(
59 60 61 62
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            check_dygraph=(self.use_mkldnn == False))
G
gongweibao 已提交
63 64

    def test_check_grad_ingore_y(self):
65
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
66 67
        if self.dtype == np.float16:
            return
G
gongweibao 已提交
68
        self.check_grad(
69 70 71 72
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            check_dygraph=(self.use_mkldnn == False))
G
gongweibao 已提交
73

K
Kexin Zhao 已提交
74 75 76 77 78 79
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)

    def init_dtype(self):
80
        self.dtype = np.float64
K
Kexin Zhao 已提交
81 82

    def init_axis(self):
83
        self.axis = -1
K
Kexin Zhao 已提交
84 85


86 87
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
K
Kexin Zhao 已提交
88
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
K
Kexin Zhao 已提交
89
    def init_dtype(self):
K
Kexin Zhao 已提交
90 91 92
        self.dtype = np.float16

    def test_check_output(self):
93
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
94 95 96
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
97 98
                self.check_output_with_place(
                    place, atol=1e-3, check_dygraph=(self.use_mkldnn == False))
K
Kexin Zhao 已提交
99

G
gongweibao 已提交
100

101 102
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
103
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
K
Kexin Zhao 已提交
104 105 106 107 108 109
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y


110 111
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
112 113 114 115 116
class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y
117 118


119 120
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
K
Kexin Zhao 已提交
121
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
122 123 124 125 126 127
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y


128 129
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
K
Kexin Zhao 已提交
130 131 132 133 134
class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y
135 136


K
Kexin Zhao 已提交
137
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
K
Kexin Zhao 已提交
138
    def init_input_output(self):
139 140
        self.x = np.random.random((100, )).astype(self.dtype)
        self.y = np.random.random((100, )).astype(self.dtype)
K
Kexin Zhao 已提交
141 142 143 144 145
        self.out = np.add(self.x, self.y)


class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
    def init_input_output(self):
146 147
        self.x = np.random.random((100, )).astype(self.dtype)
        self.y = np.random.random((100, )).astype(self.dtype)
K
Kexin Zhao 已提交
148
        self.out = np.add(self.x, self.y)
G
gongweibao 已提交
149 150


K
Kexin Zhao 已提交
151
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
152
    def init_input_output(self):
153 154 155
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
G
gongweibao 已提交
156

K
Kexin Zhao 已提交
157 158 159 160 161 162
    def init_axis(self):
        self.axis = 0


class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
163 164 165
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
K
Kexin Zhao 已提交
166 167 168

    def init_axis(self):
        self.axis = 0
G
gongweibao 已提交
169 170


K
Kexin Zhao 已提交
171
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
172
    def init_input_output(self):
173 174 175
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
G
gongweibao 已提交
176

K
Kexin Zhao 已提交
177 178 179 180 181 182
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
183 184 185
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
K
Kexin Zhao 已提交
186 187 188

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
189 190


K
Kexin Zhao 已提交
191
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
192
    def init_input_output(self):
193 194 195
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
196

K
Kexin Zhao 已提交
197 198 199

class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp):
    def init_input_output(self):
200 201 202
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
203 204


K
Kexin Zhao 已提交
205
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
K
Kexin Zhao 已提交
206
    def init_input_output(self):
207 208 209
        self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
210

K
Kexin Zhao 已提交
211 212 213 214 215 216
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp):
    def init_input_output(self):
217 218 219
        self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
K
Kexin Zhao 已提交
220 221 222

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
223 224


K
Kexin Zhao 已提交
225
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
K
Kexin Zhao 已提交
226
    def init_input_output(self):
227 228 229
        self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
230 231 232

    def init_axis(self):
        self.axis = 0
233

K
Kexin Zhao 已提交
234 235 236

class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp):
    def init_input_output(self):
237 238 239
        self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
240 241 242

    def init_axis(self):
        self.axis = 0
243 244


245 246
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
    def init_input_output(self):
247 248
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
249 250 251 252 253
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp):
    def init_input_output(self):
254 255
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
256 257 258 259 260
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
    def init_input_output(self):
261 262
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
263 264 265 266 267
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp):
    def init_input_output(self):
268 269
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
270 271 272
        self.out = self.x + self.y


K
Kexin Zhao 已提交
273
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
274
    def init_input_output(self):
275 276 277
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
Q
qijun 已提交
278

K
Kexin Zhao 已提交
279 280 281 282 283 284
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
285 286 287
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
K
Kexin Zhao 已提交
288 289 290

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
291 292


293 294
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
295
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
296
    def init_input_output(self):
297
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
298 299
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)
Q
qijun 已提交
300

K
Kexin Zhao 已提交
301 302 303 304
    def init_axis(self):
        self.axis = 1


305 306
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
307 308
class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
309
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
310 311 312 313 314
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
315 316


317 318
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
    def init_input_output(self):
319 320
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
321 322 323 324 325 326 327 328
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp):
    def init_input_output(self):
329 330
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
331 332 333 334 335 336
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


337 338
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
    def init_input_output(self):
339 340
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
341 342 343 344 345 346 347 348
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
    def init_input_output(self):
349 350
        self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
351 352 353 354 355 356 357 358
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
    def init_input_output(self):
359 360
        self.x = np.random.rand(10, 12).astype(self.dtype)
        self.y = np.random.rand(2, 3, 10, 12).astype(self.dtype)
361 362 363 364 365 366
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 2


367
class TestElementwiseAddOpError(unittest.TestCase):
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
    def test_errors(self):
        with program_guard(Program(), Program()):
            # the input of elementwise_add must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
            y1 = fluid.create_lod_tensor(
                np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1)

            # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64
            # float16 only can be set on GPU place
            x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8")
            y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8")
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2)


384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
class TestAddOp(unittest.TestCase):
    def test_out(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[3], dtype="float32")
            y = fluid.data(name='y', shape=[3], dtype='float32')

            res = fluid.data(name="output", shape=[3], dtype="float32")
            y_1 = paddle.add(x, y, out=res)

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            data1 = np.array([2, 3, 4], dtype='float32')
            data2 = np.array([1, 5, 2], dtype='float32')
            np_res, np_y_1 = exe.run(feed={'x': data1,
                                           'y': data2},
                                     fetch_list=[res, y_1])

            self.assertEqual((np_res == np_y_1).all(), True)

    def test_out_gpu(self):
        if not fluid.core.is_compiled_with_cuda():
            return
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[3], dtype="float32")
            y = fluid.data(name='y', shape=[3], dtype='float32')

            res = fluid.data(name="output", shape=[3], dtype="float32")
            y_1 = paddle.add(x, y, out=res)

            place = fluid.CUDAPlace(0)
            exe = fluid.Executor(place)
            data1 = np.array([2, 3, 4], dtype='float32')
            data2 = np.array([1, 5, 2], dtype='float32')
            np_res, np_y_1 = exe.run(feed={'x': data1,
                                           'y': data2},
                                     fetch_list=[res, y_1])

            self.assertEqual((np_res == np_y_1).all(), True)

    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')

            y_1 = paddle.add(x, y, name='add_res')
            self.assertEqual(('add_res' in y_1.name), True)

    def test_alpha(self):
        with fluid.program_guard(fluid.Program()):

            def gen_data():
                return {
                    "x": np.array([2, 3, 4]).astype('float32'),
                    "y": np.array([1, 5, 2]).astype('float32')
                }

            x = fluid.data(name="x", shape=[3], dtype='float32')
            y = fluid.data(name="y", shape=[3], dtype='float32')
            z = paddle.add(x, y, alpha=10)

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
            z_expected = np.array([12., 53., 24.])
            self.assertEqual((z_value == z_expected).all(), True)

    def test_alpha_gpu(self):
        if not fluid.core.is_compiled_with_cuda():
            return
        with fluid.program_guard(fluid.Program()):

            def gen_data():
                return {
                    "x": np.array([2, 3, 4]).astype('float32'),
                    "y": np.array([1, 5, 2]).astype('float32')
                }

            x = fluid.data(name="x", shape=[3], dtype='float32')
            y = fluid.data(name="y", shape=[3], dtype='float32')
            z = paddle.add(x, y, alpha=-0.5)

            place = fluid.CUDAPlace(0)
            exe = fluid.Executor(place)
            z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
            z_expected = np.array([1.5, 0.5, 3.])
            self.assertEqual((z_value == z_expected).all(), True)

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            y = fluid.dygraph.to_variable(np_y)
            z = paddle.add(x, y, alpha=-0.5)
            np_z = z.numpy()
            z_expected = np.array([1.5, 0.5, 3.])
            self.assertEqual((np_z == z_expected).all(), True)


G
gongweibao 已提交
483 484
if __name__ == '__main__':
    unittest.main()