test_elementwise_add_op.py 23.9 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

G
gongweibao 已提交
15 16
import unittest
import numpy as np
17
import paddle
K
Kexin Zhao 已提交
18
import paddle.fluid.core as core
19
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
20
import paddle.fluid as fluid
21
from paddle.fluid import Program, program_guard
22
from paddle.fluid.framework import _test_eager_guard
G
gongweibao 已提交
23 24


K
Kexin Zhao 已提交
25
class TestElementwiseAddOp(OpTest):
26

27 28 29
    def init_kernel_type(self):
        self.use_mkldnn = False

G
gongweibao 已提交
30 31
    def setUp(self):
        self.op_type = "elementwise_add"
H
hong 已提交
32
        self.python_api = paddle.add
K
Kexin Zhao 已提交
33 34
        self.init_dtype()
        self.init_input_output()
35
        self.init_kernel_type()
K
Kexin Zhao 已提交
36
        self.init_axis()
K
Kexin Zhao 已提交
37

G
gongweibao 已提交
38
        self.inputs = {
K
Kexin Zhao 已提交
39 40
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
G
gongweibao 已提交
41
        }
42
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
K
Kexin Zhao 已提交
43
        self.outputs = {'Out': self.out}
G
gongweibao 已提交
44

H
hong 已提交
45
    def check_eager(self):
H
hong 已提交
46
        return (self.use_mkldnn == False and self.axis == -1)
H
hong 已提交
47

G
gongweibao 已提交
48
    def test_check_output(self):
49
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
50 51
        self.check_output(check_dygraph=(self.use_mkldnn == False),
                          check_eager=self.check_eager())
G
gongweibao 已提交
52 53

    def test_check_grad_normal(self):
54
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
55 56
        if self.dtype == np.float16:
            return
57 58 59 60
        self.check_grad(['X', 'Y'],
                        'Out',
                        check_dygraph=(self.use_mkldnn == False),
                        check_eager=self.check_eager())
G
gongweibao 已提交
61 62

    def test_check_grad_ingore_x(self):
63
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
64 65
        if self.dtype == np.float16:
            return
66 67 68 69 70
        self.check_grad(['Y'],
                        'Out',
                        no_grad_set=set("X"),
                        check_dygraph=(self.use_mkldnn == False),
                        check_eager=self.check_eager())
G
gongweibao 已提交
71 72

    def test_check_grad_ingore_y(self):
73
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
74 75
        if self.dtype == np.float16:
            return
76 77 78 79 80
        self.check_grad(['X'],
                        'Out',
                        no_grad_set=set('Y'),
                        check_dygraph=(self.use_mkldnn == False),
                        check_eager=self.check_eager())
G
gongweibao 已提交
81

K
Kexin Zhao 已提交
82 83 84 85 86 87
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)

    def init_dtype(self):
88
        self.dtype = np.float64
K
Kexin Zhao 已提交
89 90

    def init_axis(self):
91
        self.axis = -1
K
Kexin Zhao 已提交
92 93


94 95
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
K
Kexin Zhao 已提交
96
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
97

K
Kexin Zhao 已提交
98
    def init_dtype(self):
K
Kexin Zhao 已提交
99 100 101
        self.dtype = np.float16

    def test_check_output(self):
102
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
103 104 105
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
106 107
                self.check_output_with_place(
                    place, atol=1e-3, check_dygraph=(self.use_mkldnn == False))
K
Kexin Zhao 已提交
108

G
gongweibao 已提交
109

110
@unittest.skipIf(
111 112 113 114
    not core.is_compiled_with_cuda() or core.cudnn_version() < 8100
    or paddle.device.cuda.get_device_capability()[0] < 8,
    "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0"
)
115
class TestBF16ElementwiseAddOp(OpTest):
116

117 118 119 120 121 122 123 124 125 126 127 128 129
    def setUp(self):
        self.op_type = "elementwise_add"
        self.dtype = np.uint16

        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.out = np.add(self.x, self.y)

        self.axis = -1

        self.inputs = {
            'X':
            OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.x)),
130
            'Y': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.y))
131 132 133 134 135 136
        }
        self.attrs = {'axis': self.axis, 'use_mkldnn': False}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}

    def test_check_output(self):
        place = core.CUDAPlace(0)
H
hong 已提交
137
        self.check_output_with_place(place, check_eager=False)
138 139 140

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
H
hong 已提交
141
        self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_eager=False)
142 143 144

    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
145 146 147 148
        self.check_grad_with_place(place, ['Y'],
                                   'Out',
                                   no_grad_set=set("X"),
                                   check_eager=False)
149 150 151

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
152 153 154 155
        self.check_grad_with_place(place, ['X'],
                                   'Out',
                                   no_grad_set=set('Y'),
                                   check_eager=False)
156 157


158 159
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
160
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
161

K
Kexin Zhao 已提交
162 163 164 165 166 167
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y


168 169
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
170
class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
171

K
Kexin Zhao 已提交
172 173 174 175
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y
176 177


178 179
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
K
Kexin Zhao 已提交
180
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
181

K
Kexin Zhao 已提交
182 183 184 185 186 187
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y


188 189
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
K
Kexin Zhao 已提交
190
class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
191

K
Kexin Zhao 已提交
192 193 194 195
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y
196 197


K
Kexin Zhao 已提交
198
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
199

K
Kexin Zhao 已提交
200
    def init_input_output(self):
201 202
        self.x = np.random.random((100, )).astype(self.dtype)
        self.y = np.random.random((100, )).astype(self.dtype)
K
Kexin Zhao 已提交
203 204 205 206
        self.out = np.add(self.x, self.y)


class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
207

K
Kexin Zhao 已提交
208
    def init_input_output(self):
209 210
        self.x = np.random.random((100, )).astype(self.dtype)
        self.y = np.random.random((100, )).astype(self.dtype)
K
Kexin Zhao 已提交
211
        self.out = np.add(self.x, self.y)
G
gongweibao 已提交
212 213


K
Kexin Zhao 已提交
214
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
215

K
Kexin Zhao 已提交
216
    def init_input_output(self):
217 218 219
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
G
gongweibao 已提交
220

K
Kexin Zhao 已提交
221 222 223 224 225
    def init_axis(self):
        self.axis = 0


class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
226

K
Kexin Zhao 已提交
227
    def init_input_output(self):
228 229 230
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
K
Kexin Zhao 已提交
231 232 233

    def init_axis(self):
        self.axis = 0
G
gongweibao 已提交
234 235


K
Kexin Zhao 已提交
236
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
237

K
Kexin Zhao 已提交
238
    def init_input_output(self):
239 240 241
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
G
gongweibao 已提交
242

K
Kexin Zhao 已提交
243 244 245 246 247
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp):
248

K
Kexin Zhao 已提交
249
    def init_input_output(self):
250 251 252
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
K
Kexin Zhao 已提交
253 254 255

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
256 257


K
Kexin Zhao 已提交
258
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
259

K
Kexin Zhao 已提交
260
    def init_input_output(self):
261 262 263
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
264

K
Kexin Zhao 已提交
265 266

class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp):
267

K
Kexin Zhao 已提交
268
    def init_input_output(self):
269 270 271
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
272 273


K
Kexin Zhao 已提交
274
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
275

K
Kexin Zhao 已提交
276
    def init_input_output(self):
277
        self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype)
278 279
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
280

K
Kexin Zhao 已提交
281 282 283 284 285
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp):
286

K
Kexin Zhao 已提交
287
    def init_input_output(self):
288 289 290
        self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
K
Kexin Zhao 已提交
291 292 293

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
294 295


K
Kexin Zhao 已提交
296
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
297

K
Kexin Zhao 已提交
298
    def init_input_output(self):
299
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
300 301
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
302 303 304

    def init_axis(self):
        self.axis = 0
305

K
Kexin Zhao 已提交
306 307

class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp):
308

K
Kexin Zhao 已提交
309
    def init_input_output(self):
310
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
311 312
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
313 314 315

    def init_axis(self):
        self.axis = 0
316 317


318
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
319

320
    def init_input_output(self):
321 322
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
323 324 325 326
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp):
327

328
    def init_input_output(self):
329 330
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
331 332 333 334
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
335

336
    def init_input_output(self):
337 338
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
339 340 341 342
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
343

344 345 346
    def init_input_output(self):
        self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
        self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
347 348 349 350
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp):
351

352
    def init_input_output(self):
353 354
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
355 356 357
        self.out = self.x + self.y


K
Kexin Zhao 已提交
358
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
359

K
Kexin Zhao 已提交
360
    def init_input_output(self):
361 362 363
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
Q
qijun 已提交
364

K
Kexin Zhao 已提交
365 366 367 368 369
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
370

K
Kexin Zhao 已提交
371
    def init_input_output(self):
372 373 374
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
K
Kexin Zhao 已提交
375 376 377

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
378 379


380 381
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
382
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
383

K
Kexin Zhao 已提交
384
    def init_input_output(self):
385
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
386 387
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)
Q
qijun 已提交
388

K
Kexin Zhao 已提交
389 390 391 392
    def init_axis(self):
        self.axis = 1


393 394
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
395
class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
396

K
Kexin Zhao 已提交
397
    def init_input_output(self):
398
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
399 400 401 402 403
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
404 405


406
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
407

408
    def init_input_output(self):
409 410
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
411 412 413 414 415 416 417
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp):
418

419
    def init_input_output(self):
420 421
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
422 423 424 425 426 427
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


428
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
429

430
    def init_input_output(self):
431 432
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
433 434 435 436 437 438
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


439
class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp):
440

441
    def init_input_output(self):
442
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
443 444 445 446 447 448 449
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


450
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
451

452
    def init_input_output(self):
453 454
        self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
455 456 457 458 459 460 461
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
462

463
    def init_input_output(self):
464
        self.x = np.random.rand(10, 12).astype(self.dtype)
465
        self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype)
466 467 468 469 470 471
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 2


472
class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp):
473

474 475
    def init_input_output(self):
        self.x = np.random.rand(10, 1, 12).astype(self.dtype)
476
        self.y = np.random.rand(10, 2, 12).astype(self.dtype)
477 478 479 480 481 482
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 0


483
class TestElementwiseAddOpError(unittest.TestCase):
484

485 486 487
    def test_errors(self):
        with program_guard(Program(), Program()):
            # the input of elementwise_add must be Variable.
488 489 490 491
            x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
                                         [[1, 1, 1, 1]], fluid.CPUPlace())
            y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]),
                                         [[1, 1, 1, 1]], fluid.CPUPlace())
492 493 494 495 496 497 498 499 500
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1)

            # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64
            # float16 only can be set on GPU place
            x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8")
            y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8")
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2)


501
class TestAddApi(unittest.TestCase):
502

503 504 505
    def _executed_api(self, x, y, name=None):
        return paddle.add(x, y, name)

506 507 508 509 510
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')

511
            y_1 = self._executed_api(x, y, name='add_res')
512 513
            self.assertEqual(('add_res' in y_1.name), True)

Y
Yang Zhang 已提交
514
    def test_declarative(self):
515 516 517 518 519 520 521 522 523 524
        with fluid.program_guard(fluid.Program()):

            def gen_data():
                return {
                    "x": np.array([2, 3, 4]).astype('float32'),
                    "y": np.array([1, 5, 2]).astype('float32')
                }

            x = fluid.data(name="x", shape=[3], dtype='float32')
            y = fluid.data(name="y", shape=[3], dtype='float32')
525
            z = self._executed_api(x, y)
526 527 528 529

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
Y
Yang Zhang 已提交
530
            z_expected = np.array([3., 8., 6.])
531 532 533 534 535 536 537 538
            self.assertEqual((z_value == z_expected).all(), True)

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            y = fluid.dygraph.to_variable(np_y)
539
            z = self._executed_api(x, y)
540
            np_z = z.numpy()
Y
Yang Zhang 已提交
541
            z_expected = np.array([3., 8., 6.])
542 543 544
            self.assertEqual((np_z == z_expected).all(), True)


545
class TestAddInplaceApi(TestAddApi):
546

547 548 549 550 551
    def _executed_api(self, x, y, name=None):
        return x.add_(y, name)


class TestAddInplaceBroadcastSuccess(unittest.TestCase):
552

553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 4).astype('float')
        self.y_numpy = np.random.rand(3, 4).astype('float')

    def test_broadcast_success(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)
        inplace_result = x.add_(y)
        numpy_result = self.x_numpy + self.y_numpy
        self.assertEqual((inplace_result.numpy() == numpy_result).all(), True)
        paddle.enable_static()


class TestAddInplaceBroadcastSuccess2(TestAddInplaceBroadcastSuccess):
569

570 571 572 573 574 575
    def init_data(self):
        self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float')
        self.y_numpy = np.random.rand(3, 1).astype('float')


class TestAddInplaceBroadcastSuccess3(TestAddInplaceBroadcastSuccess):
576

577 578 579 580 581 582
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float')
        self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float')


class TestAddInplaceBroadcastError(unittest.TestCase):
583

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
    def init_data(self):
        self.x_numpy = np.random.rand(3, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')

    def test_broadcast_errors(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)

        def broadcast_shape_error():
            x.add_(y)

        self.assertRaises(ValueError, broadcast_shape_error)
        paddle.enable_static()


class TestAddInplaceBroadcastError2(TestAddInplaceBroadcastError):
602

603 604 605 606 607 608
    def init_data(self):
        self.x_numpy = np.random.rand(2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError):
609

610 611 612 613 614
    def init_data(self):
        self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


615
class TestComplexElementwiseAddOp(OpTest):
616

617 618
    def setUp(self):
        self.op_type = "elementwise_add"
619 620
        self.dtype = np.float64
        self.shape = (2, 3, 4, 5)
621 622 623 624 625 626 627 628 629 630 631 632 633 634
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
635 636 637 638
        self.x = np.random.random(self.shape).astype(
            self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
            self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
639 640 641
        self.out = self.x + self.y

    def init_grad_input_output(self):
642 643
        self.grad_out = np.ones(
            self.shape, self.dtype) + 1J * np.ones(self.shape, self.dtype)
644 645 646 647
        self.grad_x = self.grad_out
        self.grad_y = self.grad_out

    def test_check_output(self):
H
hong 已提交
648
        self.check_output(check_eager=False)
649 650

    def test_check_grad_normal(self):
651 652 653 654
        self.check_grad(['X', 'Y'],
                        'Out',
                        user_defined_grads=[self.grad_x, self.grad_y],
                        user_defined_grad_outputs=[self.grad_out])
655 656

    def test_check_grad_ingore_x(self):
657 658 659 660 661
        self.check_grad(['Y'],
                        'Out',
                        no_grad_set=set("X"),
                        user_defined_grads=[self.grad_y],
                        user_defined_grad_outputs=[self.grad_out])
662 663

    def test_check_grad_ingore_y(self):
664 665 666 667 668
        self.check_grad(['X'],
                        'Out',
                        no_grad_set=set('Y'),
                        user_defined_grads=[self.grad_x],
                        user_defined_grad_outputs=[self.grad_out])
669 670


671
class TestRealComplexElementwiseAddOp(TestComplexElementwiseAddOp):
672

673 674 675 676 677 678 679
    def init_input_output(self):
        self.x = np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
            self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
        self.out = self.x + self.y

    def init_grad_input_output(self):
680 681
        self.grad_out = np.ones(
            self.shape, self.dtype) + 1J * np.ones(self.shape, self.dtype)
682 683 684 685
        self.grad_x = np.real(self.grad_out)
        self.grad_y = self.grad_out


686
class TestBoolAddFloatElementwiseAddop(unittest.TestCase):
687

688 689 690 691 692 693 694 695
    def test_static_add(self):
        paddle.enable_static()
        a = 1.5
        b = paddle.full([4, 5, 6], True, dtype='bool')
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)
        paddle.enable_static()

696
    def func_dygraph_add(self):
697 698
        paddle.disable_static()
        a = 1.5
699 700
        b = paddle.full([2], True, dtype='bool')
        # special case: scalar + tensor(bool)
701 702 703
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)

704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
        np_a = np.random.random((2, 3, 4)).astype(np.float64)
        np_b = np.random.random((2, 3, 4)).astype(np.float64)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: tensor + tensor
        expect_out = np_a + np_b
        actual_out = tensor_a + tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor + scalar
        expect_out = np_a + 1
        actual_out = tensor_a + 1
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: scalar + tenor
        expect_out = 1 + np_a
        actual_out = 1 + tensor_a
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()

    def test_dygraph_add(self):
        with _test_eager_guard():
            self.func_dygraph_add()
        self.func_dygraph_add()

732

G
gongweibao 已提交
733
if __name__ == '__main__':
734
    paddle.enable_static()
G
gongweibao 已提交
735
    unittest.main()