test_elementwise_add_op.py 24.3 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

G
gongweibao 已提交
15
import unittest
16

G
gongweibao 已提交
17
import numpy as np
18

19
import paddle
20
import paddle.fluid as fluid
K
Kexin Zhao 已提交
21
import paddle.fluid.core as core
22
from paddle.fluid.framework import _test_eager_guard
23 24 25
from paddle.fluid.tests.unittests.op_test import (
    OpTest,
    convert_float_to_uint16,
26
    skip_check_grad_ci,
27
)
G
gongweibao 已提交
28 29


K
Kexin Zhao 已提交
30
class TestElementwiseAddOp(OpTest):
31 32 33
    def init_kernel_type(self):
        self.use_mkldnn = False

G
gongweibao 已提交
34 35
    def setUp(self):
        self.op_type = "elementwise_add"
H
hong 已提交
36
        self.python_api = paddle.add
K
Kexin Zhao 已提交
37 38
        self.init_dtype()
        self.init_input_output()
39
        self.init_kernel_type()
K
Kexin Zhao 已提交
40
        self.init_axis()
K
Kexin Zhao 已提交
41

G
gongweibao 已提交
42
        self.inputs = {
K
Kexin Zhao 已提交
43
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
44
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
G
gongweibao 已提交
45
        }
46
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
K
Kexin Zhao 已提交
47
        self.outputs = {'Out': self.out}
G
gongweibao 已提交
48

H
hong 已提交
49
    def check_eager(self):
50
        return not self.use_mkldnn and self.axis == -1
H
hong 已提交
51

G
gongweibao 已提交
52
    def test_check_output(self):
53
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
54
        self.check_output(
55
            check_dygraph=(not self.use_mkldnn),
56 57
            check_eager=self.check_eager(),
        )
G
gongweibao 已提交
58 59

    def test_check_grad_normal(self):
60
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
61 62
        if self.dtype == np.float16:
            return
63 64 65
        self.check_grad(
            ['X', 'Y'],
            'Out',
66
            check_dygraph=(not self.use_mkldnn),
67 68
            check_eager=self.check_eager(),
        )
G
gongweibao 已提交
69 70

    def test_check_grad_ingore_x(self):
71
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
72 73
        if self.dtype == np.float16:
            return
74 75 76 77
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
78
            check_dygraph=(not self.use_mkldnn),
79 80
            check_eager=self.check_eager(),
        )
G
gongweibao 已提交
81 82

    def test_check_grad_ingore_y(self):
83
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
84 85
        if self.dtype == np.float16:
            return
86 87 88 89
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
90
            check_dygraph=(not self.use_mkldnn),
91 92
            check_eager=self.check_eager(),
        )
G
gongweibao 已提交
93

K
Kexin Zhao 已提交
94 95 96 97 98 99
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)

    def init_dtype(self):
100
        self.dtype = np.float64
K
Kexin Zhao 已提交
101 102

    def init_axis(self):
103
        self.axis = -1
K
Kexin Zhao 已提交
104 105


106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
class TestElementwiseAddOp_ZeroDim1(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.out = np.add(self.x, self.y)


class TestElementwiseAddOp_ZeroDim2(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)


class TestElementwiseAddOp_ZeroDim3(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.out = np.add(self.x, self.y)


127 128 129
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
K
Kexin Zhao 已提交
130
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
K
Kexin Zhao 已提交
131
    def init_dtype(self):
K
Kexin Zhao 已提交
132 133 134
        self.dtype = np.float16

    def test_check_output(self):
135
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
136 137 138
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
139
                self.check_output_with_place(
140
                    place, atol=1e-3, check_dygraph=(not self.use_mkldnn)
141
                )
K
Kexin Zhao 已提交
142

G
gongweibao 已提交
143

144
@unittest.skipIf(
145 146
    not core.is_compiled_with_cuda()
    or core.cudnn_version() < 8100
147
    or paddle.device.cuda.get_device_capability()[0] < 8,
148
    "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0",
149
)
150 151 152 153 154 155 156 157 158 159 160 161
class TestBF16ElementwiseAddOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_add"
        self.dtype = np.uint16

        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.out = np.add(self.x, self.y)

        self.axis = -1

        self.inputs = {
162 163 164 165 166 167
            'X': OpTest.np_dtype_to_fluid_dtype(
                convert_float_to_uint16(self.x)
            ),
            'Y': OpTest.np_dtype_to_fluid_dtype(
                convert_float_to_uint16(self.y)
            ),
168 169 170 171 172 173
        }
        self.attrs = {'axis': self.axis, 'use_mkldnn': False}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}

    def test_check_output(self):
        place = core.CUDAPlace(0)
H
hong 已提交
174
        self.check_output_with_place(place, check_eager=False)
175 176 177

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
H
hong 已提交
178
        self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_eager=False)
179 180 181

    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
182 183 184
        self.check_grad_with_place(
            place, ['Y'], 'Out', no_grad_set=set("X"), check_eager=False
        )
185 186 187

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
188 189 190
        self.check_grad_with_place(
            place, ['X'], 'Out', no_grad_set=set('Y'), check_eager=False
        )
191 192


193
@skip_check_grad_ci(
194 195
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
196
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
K
Kexin Zhao 已提交
197 198 199 200 201 202
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y


203
@skip_check_grad_ci(
204 205
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
206 207 208 209 210
class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y
211 212


213
@skip_check_grad_ci(
214 215
    reason="[skip shape check] Use y_shape(1,1) to test broadcast."
)
K
Kexin Zhao 已提交
216
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
217 218 219 220 221 222
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y


223
@skip_check_grad_ci(
224 225
    reason="[skip shape check] Use y_shape(1,1) to test broadcast."
)
K
Kexin Zhao 已提交
226 227 228 229 230
class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y
231 232


K
Kexin Zhao 已提交
233
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
K
Kexin Zhao 已提交
234
    def init_input_output(self):
235 236
        self.x = np.random.random((100,)).astype(self.dtype)
        self.y = np.random.random((100,)).astype(self.dtype)
K
Kexin Zhao 已提交
237 238 239 240 241
        self.out = np.add(self.x, self.y)


class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
    def init_input_output(self):
242 243
        self.x = np.random.random((100,)).astype(self.dtype)
        self.y = np.random.random((100,)).astype(self.dtype)
K
Kexin Zhao 已提交
244
        self.out = np.add(self.x, self.y)
G
gongweibao 已提交
245 246


K
Kexin Zhao 已提交
247
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
248
    def init_input_output(self):
249 250 251
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
G
gongweibao 已提交
252

K
Kexin Zhao 已提交
253 254 255 256 257 258
    def init_axis(self):
        self.axis = 0


class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
259 260 261
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
K
Kexin Zhao 已提交
262 263 264

    def init_axis(self):
        self.axis = 0
G
gongweibao 已提交
265 266


K
Kexin Zhao 已提交
267
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
268
    def init_input_output(self):
269 270 271
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
G
gongweibao 已提交
272

K
Kexin Zhao 已提交
273 274 275 276 277 278
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
279 280 281
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
K
Kexin Zhao 已提交
282 283 284

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
285 286


K
Kexin Zhao 已提交
287
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
288
    def init_input_output(self):
289 290 291
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
292

K
Kexin Zhao 已提交
293 294 295

class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp):
    def init_input_output(self):
296 297 298
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
299 300


K
Kexin Zhao 已提交
301
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
K
Kexin Zhao 已提交
302
    def init_input_output(self):
303
        self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype)
304 305
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
306

K
Kexin Zhao 已提交
307 308 309 310 311 312
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp):
    def init_input_output(self):
313 314 315
        self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
K
Kexin Zhao 已提交
316 317 318

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
319 320


K
Kexin Zhao 已提交
321
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
K
Kexin Zhao 已提交
322
    def init_input_output(self):
323
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
324 325
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
326 327 328

    def init_axis(self):
        self.axis = 0
329

K
Kexin Zhao 已提交
330 331 332

class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp):
    def init_input_output(self):
333
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
334 335
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
336 337 338

    def init_axis(self):
        self.axis = 0
339 340


341 342
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
    def init_input_output(self):
343 344
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
345 346 347 348 349
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp):
    def init_input_output(self):
350 351
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
352 353 354 355 356
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
    def init_input_output(self):
357 358
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
359 360 361 362 363 364 365
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
        self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
366 367 368 369 370
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp):
    def init_input_output(self):
371 372
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
373 374 375
        self.out = self.x + self.y


K
Kexin Zhao 已提交
376
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
377
    def init_input_output(self):
378 379 380
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
Q
qijun 已提交
381

K
Kexin Zhao 已提交
382 383 384 385 386 387
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
388 389 390
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
K
Kexin Zhao 已提交
391 392 393

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
394 395


396
@skip_check_grad_ci(
397 398
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
399
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
400
    def init_input_output(self):
401
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
402 403
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)
Q
qijun 已提交
404

K
Kexin Zhao 已提交
405 406 407 408
    def init_axis(self):
        self.axis = 1


409
@skip_check_grad_ci(
410 411
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
412 413
class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
414
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
415 416 417 418 419
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
420 421


422 423
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
    def init_input_output(self):
424 425
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
426 427 428 429 430 431 432 433
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp):
    def init_input_output(self):
434 435
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
436 437 438 439 440 441
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


442 443
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
    def init_input_output(self):
444 445
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
446 447 448 449 450 451
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


452 453
class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
454
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
455 456 457 458 459 460 461
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


462 463
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
    def init_input_output(self):
464 465
        self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
466 467 468 469 470 471 472 473
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
    def init_input_output(self):
474
        self.x = np.random.rand(10, 12).astype(self.dtype)
475
        self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype)
476 477 478 479 480 481
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 2


482 483 484
class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(10, 1, 12).astype(self.dtype)
485
        self.y = np.random.rand(10, 2, 12).astype(self.dtype)
486 487 488 489 490 491
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 0


492 493 494 495
class TestAddApi(unittest.TestCase):
    def _executed_api(self, x, y, name=None):
        return paddle.add(x, y, name)

496 497 498 499 500
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')

501
            y_1 = self._executed_api(x, y, name='add_res')
502 503
            self.assertEqual(('add_res' in y_1.name), True)

Y
Yang Zhang 已提交
504
    def test_declarative(self):
505 506 507 508 509
        with fluid.program_guard(fluid.Program()):

            def gen_data():
                return {
                    "x": np.array([2, 3, 4]).astype('float32'),
510
                    "y": np.array([1, 5, 2]).astype('float32'),
511 512 513 514
                }

            x = fluid.data(name="x", shape=[3], dtype='float32')
            y = fluid.data(name="y", shape=[3], dtype='float32')
515
            z = self._executed_api(x, y)
516 517 518 519

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
520
            z_expected = np.array([3.0, 8.0, 6.0])
521 522 523 524 525 526 527 528
            self.assertEqual((z_value == z_expected).all(), True)

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            y = fluid.dygraph.to_variable(np_y)
529
            z = self._executed_api(x, y)
530
            np_z = z.numpy()
531
            z_expected = np.array([3.0, 8.0, 6.0])
532 533 534
            self.assertEqual((np_z == z_expected).all(), True)


535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
class TestAddInplaceApi(TestAddApi):
    def _executed_api(self, x, y, name=None):
        return x.add_(y, name)


class TestAddInplaceBroadcastSuccess(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 4).astype('float')
        self.y_numpy = np.random.rand(3, 4).astype('float')

    def test_broadcast_success(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)
        inplace_result = x.add_(y)
        numpy_result = self.x_numpy + self.y_numpy
        self.assertEqual((inplace_result.numpy() == numpy_result).all(), True)
        paddle.enable_static()


class TestAddInplaceBroadcastSuccess2(TestAddInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float')
        self.y_numpy = np.random.rand(3, 1).astype('float')


class TestAddInplaceBroadcastSuccess3(TestAddInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float')
        self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float')


class TestAddInplaceBroadcastError(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(3, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')

    def test_broadcast_errors(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)

        def broadcast_shape_error():
            x.add_(y)

        self.assertRaises(ValueError, broadcast_shape_error)
        paddle.enable_static()


class TestAddInplaceBroadcastError2(TestAddInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


598 599 600
class TestComplexElementwiseAddOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_add"
601 602
        self.dtype = np.float64
        self.shape = (2, 3, 4, 5)
603 604 605 606 607
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
608
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
609 610 611 612 613 614 615 616
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
617
        self.x = np.random.random(self.shape).astype(
618 619
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
620
        self.y = np.random.random(self.shape).astype(
621 622
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
623 624 625
        self.out = self.x + self.y

    def init_grad_input_output(self):
626 627 628
        self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones(
            self.shape, self.dtype
        )
629 630 631 632
        self.grad_x = self.grad_out
        self.grad_y = self.grad_out

    def test_check_output(self):
H
hong 已提交
633
        self.check_output(check_eager=False)
634 635

    def test_check_grad_normal(self):
636 637 638 639 640 641
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
642 643

    def test_check_grad_ingore_x(self):
644 645 646 647 648 649 650
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
651 652

    def test_check_grad_ingore_y(self):
653 654 655 656 657 658 659
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out],
        )
660 661


662 663 664 665
class TestRealComplexElementwiseAddOp(TestComplexElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
666 667
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
668 669 670
        self.out = self.x + self.y

    def init_grad_input_output(self):
671 672 673
        self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones(
            self.shape, self.dtype
        )
674 675 676 677
        self.grad_x = np.real(self.grad_out)
        self.grad_y = self.grad_out


678 679 680 681 682 683 684 685 686
class TestBoolAddFloatElementwiseAddop(unittest.TestCase):
    def test_static_add(self):
        paddle.enable_static()
        a = 1.5
        b = paddle.full([4, 5, 6], True, dtype='bool')
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)
        paddle.enable_static()

687
    def func_dygraph_add(self):
688 689
        paddle.disable_static()
        a = 1.5
690 691
        b = paddle.full([2], True, dtype='bool')
        # special case: scalar + tensor(bool)
692 693 694
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)

695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
        np_a = np.random.random((2, 3, 4)).astype(np.float64)
        np_b = np.random.random((2, 3, 4)).astype(np.float64)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: tensor + tensor
        expect_out = np_a + np_b
        actual_out = tensor_a + tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor + scalar
        expect_out = np_a + 1
        actual_out = tensor_a + 1
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: scalar + tenor
        expect_out = 1 + np_a
        actual_out = 1 + tensor_a
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()

    def test_dygraph_add(self):
        with _test_eager_guard():
            self.func_dygraph_add()
        self.func_dygraph_add()

723

724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
class TestElementwiseAddop1(unittest.TestCase):
    def func_dygraph_add(self):
        paddle.disable_static()

        np_a = np.random.random((2, 3, 4)).astype(np.float32)
        np_b = np.random.random((2, 3, 4)).astype(np.float32)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: nparray + tenor
        expect_out = np_a + np_b
        actual_out = np_a + tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor + nparray
        actual_out = tensor_a + np_b
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()

    def test_dygraph_add(self):
        with _test_eager_guard():
            self.func_dygraph_add()


G
gongweibao 已提交
750
if __name__ == '__main__':
751
    paddle.enable_static()
G
gongweibao 已提交
752
    unittest.main()