test_elementwise_add_op.py 23.9 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
G
gongweibao 已提交
16 17
import unittest
import numpy as np
18
import paddle
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
20
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
21 22
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
23
from paddle.fluid.framework import _test_eager_guard
G
gongweibao 已提交
24 25


K
Kexin Zhao 已提交
26
class TestElementwiseAddOp(OpTest):
27

28 29 30
    def init_kernel_type(self):
        self.use_mkldnn = False

G
gongweibao 已提交
31 32
    def setUp(self):
        self.op_type = "elementwise_add"
H
hong 已提交
33
        self.python_api = paddle.add
K
Kexin Zhao 已提交
34 35
        self.init_dtype()
        self.init_input_output()
36
        self.init_kernel_type()
K
Kexin Zhao 已提交
37
        self.init_axis()
K
Kexin Zhao 已提交
38

G
gongweibao 已提交
39
        self.inputs = {
K
Kexin Zhao 已提交
40 41
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
G
gongweibao 已提交
42
        }
43
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
K
Kexin Zhao 已提交
44
        self.outputs = {'Out': self.out}
G
gongweibao 已提交
45

H
hong 已提交
46
    def check_eager(self):
H
hong 已提交
47
        return (self.use_mkldnn == False and self.axis == -1)
H
hong 已提交
48

G
gongweibao 已提交
49
    def test_check_output(self):
50
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
51 52
        self.check_output(check_dygraph=(self.use_mkldnn == False),
                          check_eager=self.check_eager())
G
gongweibao 已提交
53 54

    def test_check_grad_normal(self):
55
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
56 57
        if self.dtype == np.float16:
            return
58 59 60 61
        self.check_grad(['X', 'Y'],
                        'Out',
                        check_dygraph=(self.use_mkldnn == False),
                        check_eager=self.check_eager())
G
gongweibao 已提交
62 63

    def test_check_grad_ingore_x(self):
64
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
65 66
        if self.dtype == np.float16:
            return
67 68 69 70 71
        self.check_grad(['Y'],
                        'Out',
                        no_grad_set=set("X"),
                        check_dygraph=(self.use_mkldnn == False),
                        check_eager=self.check_eager())
G
gongweibao 已提交
72 73

    def test_check_grad_ingore_y(self):
74
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
75 76
        if self.dtype == np.float16:
            return
77 78 79 80 81
        self.check_grad(['X'],
                        'Out',
                        no_grad_set=set('Y'),
                        check_dygraph=(self.use_mkldnn == False),
                        check_eager=self.check_eager())
G
gongweibao 已提交
82

K
Kexin Zhao 已提交
83 84 85 86 87 88
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)

    def init_dtype(self):
89
        self.dtype = np.float64
K
Kexin Zhao 已提交
90 91

    def init_axis(self):
92
        self.axis = -1
K
Kexin Zhao 已提交
93 94


95 96
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
K
Kexin Zhao 已提交
97
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
98

K
Kexin Zhao 已提交
99
    def init_dtype(self):
K
Kexin Zhao 已提交
100 101 102
        self.dtype = np.float16

    def test_check_output(self):
103
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
104 105 106
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
107 108
                self.check_output_with_place(
                    place, atol=1e-3, check_dygraph=(self.use_mkldnn == False))
K
Kexin Zhao 已提交
109

G
gongweibao 已提交
110

111
@unittest.skipIf(
112 113 114 115
    not core.is_compiled_with_cuda() or core.cudnn_version() < 8100
    or paddle.device.cuda.get_device_capability()[0] < 8,
    "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0"
)
116
class TestBF16ElementwiseAddOp(OpTest):
117

118 119 120 121 122 123 124 125 126 127 128 129 130
    def setUp(self):
        self.op_type = "elementwise_add"
        self.dtype = np.uint16

        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.out = np.add(self.x, self.y)

        self.axis = -1

        self.inputs = {
            'X':
            OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.x)),
131
            'Y': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.y))
132 133 134 135 136 137
        }
        self.attrs = {'axis': self.axis, 'use_mkldnn': False}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}

    def test_check_output(self):
        place = core.CUDAPlace(0)
H
hong 已提交
138
        self.check_output_with_place(place, check_eager=False)
139 140 141

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
H
hong 已提交
142
        self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_eager=False)
143 144 145

    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
146 147 148 149
        self.check_grad_with_place(place, ['Y'],
                                   'Out',
                                   no_grad_set=set("X"),
                                   check_eager=False)
150 151 152

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
153 154 155 156
        self.check_grad_with_place(place, ['X'],
                                   'Out',
                                   no_grad_set=set('Y'),
                                   check_eager=False)
157 158


159 160
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
161
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
162

K
Kexin Zhao 已提交
163 164 165 166 167 168
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y


169 170
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
171
class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
172

K
Kexin Zhao 已提交
173 174 175 176
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y
177 178


179 180
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
K
Kexin Zhao 已提交
181
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
182

K
Kexin Zhao 已提交
183 184 185 186 187 188
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y


189 190
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
K
Kexin Zhao 已提交
191
class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
192

K
Kexin Zhao 已提交
193 194 195 196
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y
197 198


K
Kexin Zhao 已提交
199
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
200

K
Kexin Zhao 已提交
201
    def init_input_output(self):
202 203
        self.x = np.random.random((100, )).astype(self.dtype)
        self.y = np.random.random((100, )).astype(self.dtype)
K
Kexin Zhao 已提交
204 205 206 207
        self.out = np.add(self.x, self.y)


class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
208

K
Kexin Zhao 已提交
209
    def init_input_output(self):
210 211
        self.x = np.random.random((100, )).astype(self.dtype)
        self.y = np.random.random((100, )).astype(self.dtype)
K
Kexin Zhao 已提交
212
        self.out = np.add(self.x, self.y)
G
gongweibao 已提交
213 214


K
Kexin Zhao 已提交
215
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
216

K
Kexin Zhao 已提交
217
    def init_input_output(self):
218 219 220
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
G
gongweibao 已提交
221

K
Kexin Zhao 已提交
222 223 224 225 226
    def init_axis(self):
        self.axis = 0


class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
227

K
Kexin Zhao 已提交
228
    def init_input_output(self):
229 230 231
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
K
Kexin Zhao 已提交
232 233 234

    def init_axis(self):
        self.axis = 0
G
gongweibao 已提交
235 236


K
Kexin Zhao 已提交
237
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
238

K
Kexin Zhao 已提交
239
    def init_input_output(self):
240 241 242
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
G
gongweibao 已提交
243

K
Kexin Zhao 已提交
244 245 246 247 248
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp):
249

K
Kexin Zhao 已提交
250
    def init_input_output(self):
251 252 253
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
K
Kexin Zhao 已提交
254 255 256

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
257 258


K
Kexin Zhao 已提交
259
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
260

K
Kexin Zhao 已提交
261
    def init_input_output(self):
262 263 264
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
265

K
Kexin Zhao 已提交
266 267

class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp):
268

K
Kexin Zhao 已提交
269
    def init_input_output(self):
270 271 272
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
273 274


K
Kexin Zhao 已提交
275
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
276

K
Kexin Zhao 已提交
277
    def init_input_output(self):
278
        self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype)
279 280
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
281

K
Kexin Zhao 已提交
282 283 284 285 286
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp):
287

K
Kexin Zhao 已提交
288
    def init_input_output(self):
289 290 291
        self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
K
Kexin Zhao 已提交
292 293 294

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
295 296


K
Kexin Zhao 已提交
297
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
298

K
Kexin Zhao 已提交
299
    def init_input_output(self):
300
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
301 302
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
303 304 305

    def init_axis(self):
        self.axis = 0
306

K
Kexin Zhao 已提交
307 308

class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp):
309

K
Kexin Zhao 已提交
310
    def init_input_output(self):
311
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
312 313
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
314 315 316

    def init_axis(self):
        self.axis = 0
317 318


319
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
320

321
    def init_input_output(self):
322 323
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
324 325 326 327
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp):
328

329
    def init_input_output(self):
330 331
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
332 333 334 335
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
336

337
    def init_input_output(self):
338 339
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
340 341 342 343
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
344

345 346 347
    def init_input_output(self):
        self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
        self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
348 349 350 351
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp):
352

353
    def init_input_output(self):
354 355
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
356 357 358
        self.out = self.x + self.y


K
Kexin Zhao 已提交
359
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
360

K
Kexin Zhao 已提交
361
    def init_input_output(self):
362 363 364
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
Q
qijun 已提交
365

K
Kexin Zhao 已提交
366 367 368 369 370
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
371

K
Kexin Zhao 已提交
372
    def init_input_output(self):
373 374 375
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
K
Kexin Zhao 已提交
376 377 378

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
379 380


381 382
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
383
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
384

K
Kexin Zhao 已提交
385
    def init_input_output(self):
386
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
387 388
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)
Q
qijun 已提交
389

K
Kexin Zhao 已提交
390 391 392 393
    def init_axis(self):
        self.axis = 1


394 395
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
396
class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
397

K
Kexin Zhao 已提交
398
    def init_input_output(self):
399
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
400 401 402 403 404
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
405 406


407
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
408

409
    def init_input_output(self):
410 411
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
412 413 414 415 416 417 418
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp):
419

420
    def init_input_output(self):
421 422
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
423 424 425 426 427 428
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


429
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
430

431
    def init_input_output(self):
432 433
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
434 435 436 437 438 439
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


440
class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp):
441

442
    def init_input_output(self):
443
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
444 445 446 447 448 449 450
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


451
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
452

453
    def init_input_output(self):
454 455
        self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
456 457 458 459 460 461 462
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
463

464
    def init_input_output(self):
465
        self.x = np.random.rand(10, 12).astype(self.dtype)
466
        self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype)
467 468 469 470 471 472
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 2


473
class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp):
474

475 476
    def init_input_output(self):
        self.x = np.random.rand(10, 1, 12).astype(self.dtype)
477
        self.y = np.random.rand(10, 2, 12).astype(self.dtype)
478 479 480 481 482 483
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 0


484
class TestElementwiseAddOpError(unittest.TestCase):
485

486 487 488
    def test_errors(self):
        with program_guard(Program(), Program()):
            # the input of elementwise_add must be Variable.
489 490 491 492
            x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
                                         [[1, 1, 1, 1]], fluid.CPUPlace())
            y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
                                         [[1, 1, 1, 1]], fluid.CPUPlace())
493 494 495 496 497 498 499 500 501
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1)

            # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64
            # float16 only can be set on GPU place
            x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8")
            y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8")
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2)


502
class TestAddApi(unittest.TestCase):
503

504 505 506
    def _executed_api(self, x, y, name=None):
        return paddle.add(x, y, name)

507 508 509 510 511
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')

512
            y_1 = self._executed_api(x, y, name='add_res')
513 514
            self.assertEqual(('add_res' in y_1.name), True)

Y
Yang Zhang 已提交
515
    def test_declarative(self):
516 517 518 519 520 521 522 523 524 525
        with fluid.program_guard(fluid.Program()):

            def gen_data():
                return {
                    "x": np.array([2, 3, 4]).astype('float32'),
                    "y": np.array([1, 5, 2]).astype('float32')
                }

            x = fluid.data(name="x", shape=[3], dtype='float32')
            y = fluid.data(name="y", shape=[3], dtype='float32')
526
            z = self._executed_api(x, y)
527 528 529 530

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
Y
Yang Zhang 已提交
531
            z_expected = np.array([3., 8., 6.])
532 533 534 535 536 537 538 539
            self.assertEqual((z_value == z_expected).all(), True)

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            y = fluid.dygraph.to_variable(np_y)
540
            z = self._executed_api(x, y)
541
            np_z = z.numpy()
Y
Yang Zhang 已提交
542
            z_expected = np.array([3., 8., 6.])
543 544 545
            self.assertEqual((np_z == z_expected).all(), True)


546
class TestAddInplaceApi(TestAddApi):
547

548 549 550 551 552
    def _executed_api(self, x, y, name=None):
        return x.add_(y, name)


class TestAddInplaceBroadcastSuccess(unittest.TestCase):
553

554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 4).astype('float')
        self.y_numpy = np.random.rand(3, 4).astype('float')

    def test_broadcast_success(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)
        inplace_result = x.add_(y)
        numpy_result = self.x_numpy + self.y_numpy
        self.assertEqual((inplace_result.numpy() == numpy_result).all(), True)
        paddle.enable_static()


class TestAddInplaceBroadcastSuccess2(TestAddInplaceBroadcastSuccess):
570

571 572 573 574 575 576
    def init_data(self):
        self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float')
        self.y_numpy = np.random.rand(3, 1).astype('float')


class TestAddInplaceBroadcastSuccess3(TestAddInplaceBroadcastSuccess):
577

578 579 580 581 582 583
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float')
        self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float')


class TestAddInplaceBroadcastError(unittest.TestCase):
584

585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
    def init_data(self):
        self.x_numpy = np.random.rand(3, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')

    def test_broadcast_errors(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)

        def broadcast_shape_error():
            x.add_(y)

        self.assertRaises(ValueError, broadcast_shape_error)
        paddle.enable_static()


class TestAddInplaceBroadcastError2(TestAddInplaceBroadcastError):
603

604 605 606 607 608 609
    def init_data(self):
        self.x_numpy = np.random.rand(2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError):
610

611 612 613 614 615
    def init_data(self):
        self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


616
class TestComplexElementwiseAddOp(OpTest):
617

618 619
    def setUp(self):
        self.op_type = "elementwise_add"
620 621
        self.dtype = np.float64
        self.shape = (2, 3, 4, 5)
622 623 624 625 626 627 628 629 630 631 632 633 634 635
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
636 637 638 639
        self.x = np.random.random(self.shape).astype(
            self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
            self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
640 641 642
        self.out = self.x + self.y

    def init_grad_input_output(self):
643 644
        self.grad_out = np.ones(
            self.shape, self.dtype) + 1J * np.ones(self.shape, self.dtype)
645 646 647 648
        self.grad_x = self.grad_out
        self.grad_y = self.grad_out

    def test_check_output(self):
H
hong 已提交
649
        self.check_output(check_eager=False)
650 651

    def test_check_grad_normal(self):
652 653 654 655
        self.check_grad(['X', 'Y'],
                        'Out',
                        user_defined_grads=[self.grad_x, self.grad_y],
                        user_defined_grad_outputs=[self.grad_out])
656 657

    def test_check_grad_ingore_x(self):
658 659 660 661 662
        self.check_grad(['Y'],
                        'Out',
                        no_grad_set=set("X"),
                        user_defined_grads=[self.grad_y],
                        user_defined_grad_outputs=[self.grad_out])
663 664

    def test_check_grad_ingore_y(self):
665 666 667 668 669
        self.check_grad(['X'],
                        'Out',
                        no_grad_set=set('Y'),
                        user_defined_grads=[self.grad_x],
                        user_defined_grad_outputs=[self.grad_out])
670 671


672
class TestRealComplexElementwiseAddOp(TestComplexElementwiseAddOp):
673

674 675 676 677 678 679 680
    def init_input_output(self):
        self.x = np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
            self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
        self.out = self.x + self.y

    def init_grad_input_output(self):
681 682
        self.grad_out = np.ones(
            self.shape, self.dtype) + 1J * np.ones(self.shape, self.dtype)
683 684 685 686
        self.grad_x = np.real(self.grad_out)
        self.grad_y = self.grad_out


687
class TestBoolAddFloatElementwiseAddop(unittest.TestCase):
688

689 690 691 692 693 694 695 696
    def test_static_add(self):
        paddle.enable_static()
        a = 1.5
        b = paddle.full([4, 5, 6], True, dtype='bool')
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)
        paddle.enable_static()

697
    def func_dygraph_add(self):
698 699
        paddle.disable_static()
        a = 1.5
700 701
        b = paddle.full([2], True, dtype='bool')
        # special case: scalar + tensor(bool)
702 703 704
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)

705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
        np_a = np.random.random((2, 3, 4)).astype(np.float64)
        np_b = np.random.random((2, 3, 4)).astype(np.float64)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: tensor + tensor
        expect_out = np_a + np_b
        actual_out = tensor_a + tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor + scalar
        expect_out = np_a + 1
        actual_out = tensor_a + 1
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: scalar + tenor
        expect_out = 1 + np_a
        actual_out = 1 + tensor_a
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()

    def test_dygraph_add(self):
        with _test_eager_guard():
            self.func_dygraph_add()
        self.func_dygraph_add()

733

G
gongweibao 已提交
734
if __name__ == '__main__':
735
    paddle.enable_static()
G
gongweibao 已提交
736
    unittest.main()