test_elementwise_add_op.py 22.8 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
G
gongweibao 已提交
16 17
import unittest
import numpy as np
18
import paddle
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
20
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
21 22
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
G
gongweibao 已提交
23 24


K
Kexin Zhao 已提交
25
class TestElementwiseAddOp(OpTest):
26

27 28 29
    def init_kernel_type(self):
        self.use_mkldnn = False

G
gongweibao 已提交
30 31
    def setUp(self):
        self.op_type = "elementwise_add"
H
hong 已提交
32
        self.python_api = paddle.add
K
Kexin Zhao 已提交
33 34
        self.init_dtype()
        self.init_input_output()
35
        self.init_kernel_type()
K
Kexin Zhao 已提交
36
        self.init_axis()
K
Kexin Zhao 已提交
37

G
gongweibao 已提交
38
        self.inputs = {
K
Kexin Zhao 已提交
39 40
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
G
gongweibao 已提交
41
        }
42
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
K
Kexin Zhao 已提交
43
        self.outputs = {'Out': self.out}
G
gongweibao 已提交
44

H
hong 已提交
45
    def check_eager(self):
H
hong 已提交
46
        return (self.use_mkldnn == False and self.axis == -1)
H
hong 已提交
47

G
gongweibao 已提交
48
    def test_check_output(self):
49
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
50 51
        self.check_output(check_dygraph=(self.use_mkldnn == False),
                          check_eager=self.check_eager())
G
gongweibao 已提交
52 53

    def test_check_grad_normal(self):
54
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
55 56
        if self.dtype == np.float16:
            return
57 58 59 60
        self.check_grad(['X', 'Y'],
                        'Out',
                        check_dygraph=(self.use_mkldnn == False),
                        check_eager=self.check_eager())
G
gongweibao 已提交
61 62

    def test_check_grad_ingore_x(self):
63
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
64 65
        if self.dtype == np.float16:
            return
66 67 68 69 70
        self.check_grad(['Y'],
                        'Out',
                        no_grad_set=set("X"),
                        check_dygraph=(self.use_mkldnn == False),
                        check_eager=self.check_eager())
G
gongweibao 已提交
71 72

    def test_check_grad_ingore_y(self):
73
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
74 75
        if self.dtype == np.float16:
            return
76 77 78 79 80
        self.check_grad(['X'],
                        'Out',
                        no_grad_set=set('Y'),
                        check_dygraph=(self.use_mkldnn == False),
                        check_eager=self.check_eager())
G
gongweibao 已提交
81

K
Kexin Zhao 已提交
82 83 84 85 86 87
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)

    def init_dtype(self):
88
        self.dtype = np.float64
K
Kexin Zhao 已提交
89 90

    def init_axis(self):
91
        self.axis = -1
K
Kexin Zhao 已提交
92 93


94 95
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
K
Kexin Zhao 已提交
96
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
97

K
Kexin Zhao 已提交
98
    def init_dtype(self):
K
Kexin Zhao 已提交
99 100 101
        self.dtype = np.float16

    def test_check_output(self):
102
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
103 104 105
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
106 107
                self.check_output_with_place(
                    place, atol=1e-3, check_dygraph=(self.use_mkldnn == False))
K
Kexin Zhao 已提交
108

G
gongweibao 已提交
109

110 111 112 113
@unittest.skipIf(
    not core.is_compiled_with_cuda() or core.cudnn_version() < 8100,
    "core is not compiled with CUDA and cudnn version need larger than 8.1.0")
class TestBF16ElementwiseAddOp(OpTest):
114

115 116 117 118 119 120 121 122 123 124 125 126 127
    def setUp(self):
        self.op_type = "elementwise_add"
        self.dtype = np.uint16

        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.out = np.add(self.x, self.y)

        self.axis = -1

        self.inputs = {
            'X':
            OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.x)),
128
            'Y': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.y))
129 130 131 132 133 134
        }
        self.attrs = {'axis': self.axis, 'use_mkldnn': False}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}

    def test_check_output(self):
        place = core.CUDAPlace(0)
H
hong 已提交
135
        self.check_output_with_place(place, check_eager=False)
136 137 138

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
H
hong 已提交
139
        self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_eager=False)
140 141 142

    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
143 144 145 146
        self.check_grad_with_place(place, ['Y'],
                                   'Out',
                                   no_grad_set=set("X"),
                                   check_eager=False)
147 148 149

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
150 151 152 153
        self.check_grad_with_place(place, ['X'],
                                   'Out',
                                   no_grad_set=set('Y'),
                                   check_eager=False)
154 155


156 157
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
158
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
159

K
Kexin Zhao 已提交
160 161 162 163 164 165
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y


166 167
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
168
class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
169

K
Kexin Zhao 已提交
170 171 172 173
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y
174 175


176 177
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
K
Kexin Zhao 已提交
178
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
179

K
Kexin Zhao 已提交
180 181 182 183 184 185
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y


186 187
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
K
Kexin Zhao 已提交
188
class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
189

K
Kexin Zhao 已提交
190 191 192 193
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y
194 195


K
Kexin Zhao 已提交
196
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
197

K
Kexin Zhao 已提交
198
    def init_input_output(self):
199 200
        self.x = np.random.random((100, )).astype(self.dtype)
        self.y = np.random.random((100, )).astype(self.dtype)
K
Kexin Zhao 已提交
201 202 203 204
        self.out = np.add(self.x, self.y)


class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
205

K
Kexin Zhao 已提交
206
    def init_input_output(self):
207 208
        self.x = np.random.random((100, )).astype(self.dtype)
        self.y = np.random.random((100, )).astype(self.dtype)
K
Kexin Zhao 已提交
209
        self.out = np.add(self.x, self.y)
G
gongweibao 已提交
210 211


K
Kexin Zhao 已提交
212
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
213

K
Kexin Zhao 已提交
214
    def init_input_output(self):
215 216 217
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
G
gongweibao 已提交
218

K
Kexin Zhao 已提交
219 220 221 222 223
    def init_axis(self):
        self.axis = 0


class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
224

K
Kexin Zhao 已提交
225
    def init_input_output(self):
226 227 228
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
K
Kexin Zhao 已提交
229 230 231

    def init_axis(self):
        self.axis = 0
G
gongweibao 已提交
232 233


K
Kexin Zhao 已提交
234
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
235

K
Kexin Zhao 已提交
236
    def init_input_output(self):
237 238 239
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
G
gongweibao 已提交
240

K
Kexin Zhao 已提交
241 242 243 244 245
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp):
246

K
Kexin Zhao 已提交
247
    def init_input_output(self):
248 249 250
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
K
Kexin Zhao 已提交
251 252 253

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
254 255


K
Kexin Zhao 已提交
256
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
257

K
Kexin Zhao 已提交
258
    def init_input_output(self):
259 260 261
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
262

K
Kexin Zhao 已提交
263 264

class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp):
265

K
Kexin Zhao 已提交
266
    def init_input_output(self):
267 268 269
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
270 271


K
Kexin Zhao 已提交
272
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
273

K
Kexin Zhao 已提交
274
    def init_input_output(self):
275
        self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype)
276 277
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
278

K
Kexin Zhao 已提交
279 280 281 282 283
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp):
284

K
Kexin Zhao 已提交
285
    def init_input_output(self):
286 287 288
        self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
K
Kexin Zhao 已提交
289 290 291

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
292 293


K
Kexin Zhao 已提交
294
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
295

K
Kexin Zhao 已提交
296
    def init_input_output(self):
297
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
298 299
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
300 301 302

    def init_axis(self):
        self.axis = 0
303

K
Kexin Zhao 已提交
304 305

class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp):
306

K
Kexin Zhao 已提交
307
    def init_input_output(self):
308
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
309 310
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
311 312 313

    def init_axis(self):
        self.axis = 0
314 315


316
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
317

318
    def init_input_output(self):
319 320
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
321 322 323 324
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp):
325

326
    def init_input_output(self):
327 328
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
329 330 331 332
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
333

334
    def init_input_output(self):
335 336
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
337 338 339 340
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
341

342 343 344
    def init_input_output(self):
        self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
        self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
345 346 347 348
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp):
349

350
    def init_input_output(self):
351 352
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
353 354 355
        self.out = self.x + self.y


K
Kexin Zhao 已提交
356
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
357

K
Kexin Zhao 已提交
358
    def init_input_output(self):
359 360 361
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
Q
qijun 已提交
362

K
Kexin Zhao 已提交
363 364 365 366 367
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
368

K
Kexin Zhao 已提交
369
    def init_input_output(self):
370 371 372
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
K
Kexin Zhao 已提交
373 374 375

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
376 377


378 379
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
380
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
381

K
Kexin Zhao 已提交
382
    def init_input_output(self):
383
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
384 385
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)
Q
qijun 已提交
386

K
Kexin Zhao 已提交
387 388 389 390
    def init_axis(self):
        self.axis = 1


391 392
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
393
class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
394

K
Kexin Zhao 已提交
395
    def init_input_output(self):
396
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
397 398 399 400 401
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
402 403


404
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
405

406
    def init_input_output(self):
407 408
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
409 410 411 412 413 414 415
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp):
416

417
    def init_input_output(self):
418 419
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
420 421 422 423 424 425
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


426
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
427

428
    def init_input_output(self):
429 430
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
431 432 433 434 435 436
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


437
class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp):
438

439
    def init_input_output(self):
440
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
441 442 443 444 445 446 447
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


448
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
449

450
    def init_input_output(self):
451 452
        self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
453 454 455 456 457 458 459
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
460

461
    def init_input_output(self):
462
        self.x = np.random.rand(10, 12).astype(self.dtype)
463
        self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype)
464 465 466 467 468 469
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 2


470
class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp):
471

472 473
    def init_input_output(self):
        self.x = np.random.rand(10, 1, 12).astype(self.dtype)
474
        self.y = np.random.rand(10, 2, 12).astype(self.dtype)
475 476 477 478 479 480
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 0


481
class TestElementwiseAddOpError(unittest.TestCase):
482

483 484 485
    def test_errors(self):
        with program_guard(Program(), Program()):
            # the input of elementwise_add must be Variable.
486 487 488 489
            x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
                                         [[1, 1, 1, 1]], fluid.CPUPlace())
            y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
                                         [[1, 1, 1, 1]], fluid.CPUPlace())
490 491 492 493 494 495 496 497 498
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1)

            # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64
            # float16 only can be set on GPU place
            x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8")
            y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8")
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2)


499
class TestAddApi(unittest.TestCase):
500

501 502 503
    def _executed_api(self, x, y, name=None):
        return paddle.add(x, y, name)

504 505 506 507 508
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')

509
            y_1 = self._executed_api(x, y, name='add_res')
510 511
            self.assertEqual(('add_res' in y_1.name), True)

Y
Yang Zhang 已提交
512
    def test_declarative(self):
513 514 515 516 517 518 519 520 521 522
        with fluid.program_guard(fluid.Program()):

            def gen_data():
                return {
                    "x": np.array([2, 3, 4]).astype('float32'),
                    "y": np.array([1, 5, 2]).astype('float32')
                }

            x = fluid.data(name="x", shape=[3], dtype='float32')
            y = fluid.data(name="y", shape=[3], dtype='float32')
523
            z = self._executed_api(x, y)
524 525 526 527

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
Y
Yang Zhang 已提交
528
            z_expected = np.array([3., 8., 6.])
529 530 531 532 533 534 535 536
            self.assertEqual((z_value == z_expected).all(), True)

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            y = fluid.dygraph.to_variable(np_y)
537
            z = self._executed_api(x, y)
538
            np_z = z.numpy()
Y
Yang Zhang 已提交
539
            z_expected = np.array([3., 8., 6.])
540 541 542
            self.assertEqual((np_z == z_expected).all(), True)


543
class TestAddInplaceApi(TestAddApi):
544

545 546 547 548 549
    def _executed_api(self, x, y, name=None):
        return x.add_(y, name)


class TestAddInplaceBroadcastSuccess(unittest.TestCase):
550

551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 4).astype('float')
        self.y_numpy = np.random.rand(3, 4).astype('float')

    def test_broadcast_success(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)
        inplace_result = x.add_(y)
        numpy_result = self.x_numpy + self.y_numpy
        self.assertEqual((inplace_result.numpy() == numpy_result).all(), True)
        paddle.enable_static()


class TestAddInplaceBroadcastSuccess2(TestAddInplaceBroadcastSuccess):
567

568 569 570 571 572 573
    def init_data(self):
        self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float')
        self.y_numpy = np.random.rand(3, 1).astype('float')


class TestAddInplaceBroadcastSuccess3(TestAddInplaceBroadcastSuccess):
574

575 576 577 578 579 580
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float')
        self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float')


class TestAddInplaceBroadcastError(unittest.TestCase):
581

582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
    def init_data(self):
        self.x_numpy = np.random.rand(3, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')

    def test_broadcast_errors(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)

        def broadcast_shape_error():
            x.add_(y)

        self.assertRaises(ValueError, broadcast_shape_error)
        paddle.enable_static()


class TestAddInplaceBroadcastError2(TestAddInplaceBroadcastError):
600

601 602 603 604 605 606
    def init_data(self):
        self.x_numpy = np.random.rand(2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError):
607

608 609 610 611 612
    def init_data(self):
        self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


613
class TestComplexElementwiseAddOp(OpTest):
614

615 616
    def setUp(self):
        self.op_type = "elementwise_add"
617 618
        self.dtype = np.float64
        self.shape = (2, 3, 4, 5)
619 620 621 622 623 624 625 626 627 628 629 630 631 632
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
633 634 635 636
        self.x = np.random.random(self.shape).astype(
            self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
            self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
637 638 639
        self.out = self.x + self.y

    def init_grad_input_output(self):
640 641
        self.grad_out = np.ones(
            self.shape, self.dtype) + 1J * np.ones(self.shape, self.dtype)
642 643 644 645
        self.grad_x = self.grad_out
        self.grad_y = self.grad_out

    def test_check_output(self):
H
hong 已提交
646
        self.check_output(check_eager=False)
647 648

    def test_check_grad_normal(self):
649 650 651 652
        self.check_grad(['X', 'Y'],
                        'Out',
                        user_defined_grads=[self.grad_x, self.grad_y],
                        user_defined_grad_outputs=[self.grad_out])
653 654

    def test_check_grad_ingore_x(self):
655 656 657 658 659
        self.check_grad(['Y'],
                        'Out',
                        no_grad_set=set("X"),
                        user_defined_grads=[self.grad_y],
                        user_defined_grad_outputs=[self.grad_out])
660 661

    def test_check_grad_ingore_y(self):
662 663 664 665 666
        self.check_grad(['X'],
                        'Out',
                        no_grad_set=set('Y'),
                        user_defined_grads=[self.grad_x],
                        user_defined_grad_outputs=[self.grad_out])
667 668


669
class TestRealComplexElementwiseAddOp(TestComplexElementwiseAddOp):
670

671 672 673 674 675 676 677
    def init_input_output(self):
        self.x = np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
            self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
        self.out = self.x + self.y

    def init_grad_input_output(self):
678 679
        self.grad_out = np.ones(
            self.shape, self.dtype) + 1J * np.ones(self.shape, self.dtype)
680 681 682 683
        self.grad_x = np.real(self.grad_out)
        self.grad_y = self.grad_out


684
class TestBoolAddFloatElementwiseAddop(unittest.TestCase):
685

686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
    def test_static_add(self):
        paddle.enable_static()
        a = 1.5
        b = paddle.full([4, 5, 6], True, dtype='bool')
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)
        paddle.enable_static()

    def test_dygraph_add(self):
        paddle.disable_static()
        a = 1.5
        b = paddle.full([4, 5, 6], True, dtype='bool')
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)


G
gongweibao 已提交
702
if __name__ == '__main__':
703
    paddle.enable_static()
G
gongweibao 已提交
704
    unittest.main()