test_elementwise_add_op.py 27.5 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

G
gongweibao 已提交
15
import unittest
16

G
gongweibao 已提交
17
import numpy as np
18
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
19

20
import paddle
21
import paddle.fluid as fluid
K
Kexin Zhao 已提交
22
import paddle.fluid.core as core
23 24


K
Kexin Zhao 已提交
25
class TestElementwiseAddOp(OpTest):
26 27 28
    def init_kernel_type(self):
        self.use_mkldnn = False

G
gongweibao 已提交
29 30
    def setUp(self):
        self.op_type = "elementwise_add"
H
hong 已提交
31
        self.python_api = paddle.add
32
        self.prim_op_type = "prim"
K
Kexin Zhao 已提交
33 34
        self.init_dtype()
        self.init_input_output()
35
        self.init_kernel_type()
K
Kexin Zhao 已提交
36
        self.init_axis()
37 38 39
        self.only_prim()
        self.if_check_prim()
        self.if_skip_cinn()
K
Kexin Zhao 已提交
40

G
gongweibao 已提交
41
        self.inputs = {
K
Kexin Zhao 已提交
42
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
43
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
G
gongweibao 已提交
44
        }
45
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
K
Kexin Zhao 已提交
46
        self.outputs = {'Out': self.out}
G
gongweibao 已提交
47

48
    def check_dygraph(self):
49
        return not self.use_mkldnn and self.axis == -1
H
hong 已提交
50

G
gongweibao 已提交
51
    def test_check_output(self):
52
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
53
        self.check_output(
54
            check_dygraph=self.check_dygraph(), check_prim=self.check_prim
55
        )
G
gongweibao 已提交
56 57

    def test_check_grad_normal(self):
58
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
59 60
        if self.dtype == np.float16:
            return
61 62 63
        self.check_grad(
            ['X', 'Y'],
            'Out',
64
            check_dygraph=self.check_dygraph(),
65
            check_prim=self.check_prim,
66
        )
G
gongweibao 已提交
67 68

    def test_check_grad_ingore_x(self):
69
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
70 71
        if self.dtype == np.float16:
            return
72 73 74 75
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
76
            check_dygraph=self.check_dygraph(),
77
            check_prim=self.check_prim,
78
        )
G
gongweibao 已提交
79 80

    def test_check_grad_ingore_y(self):
81
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
82 83
        if self.dtype == np.float16:
            return
84 85 86 87
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
88
            check_dygraph=self.check_dygraph(),
89
            check_prim=self.check_prim,
90
        )
G
gongweibao 已提交
91

K
Kexin Zhao 已提交
92 93 94 95 96 97
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)

    def init_dtype(self):
98
        self.dtype = np.float64
K
Kexin Zhao 已提交
99 100

    def init_axis(self):
101
        self.axis = -1
K
Kexin Zhao 已提交
102

103 104 105 106 107 108 109 110 111
    def only_prim(self):
        pass

    def if_check_prim(self):
        self.check_prim = self.axis == -1

    def if_skip_cinn(self):
        pass

K
Kexin Zhao 已提交
112

113 114 115 116 117 118
class TestElementwiseAddOp_ZeroDim1(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.out = np.add(self.x, self.y)

119 120
    def if_skip_cinn(self):
        self.enable_cinn = False
121

122 123

class TestElementwiseAddOp_ZeroDim2(TestElementwiseAddOp_ZeroDim1):
124 125 126 127 128 129
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)


130
class TestElementwiseAddOp_ZeroDim3(TestElementwiseAddOp_ZeroDim1):
131 132 133 134 135 136
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.out = np.add(self.x, self.y)


137 138 139
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
K
Kexin Zhao 已提交
140
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
K
Kexin Zhao 已提交
141
    def init_dtype(self):
K
Kexin Zhao 已提交
142 143 144
        self.dtype = np.float16

    def test_check_output(self):
145
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
146 147 148
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
149
                self.check_output_with_place(
150 151
                    place,
                    atol=1e-3,
152 153
                    check_dygraph=self.check_dygraph(),
                    check_prim=self.check_prim,
154
                )
K
Kexin Zhao 已提交
155

156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
    def test_check_grad_normal(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            self.check_grad_with_place(
                place,
                ['X', 'Y'],
                'Out',
                check_dygraph=self.check_dygraph(),
                check_prim=self.check_prim,
            )

    def test_check_grad_ingore_x(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            self.check_grad_with_place(
                place,
                ['Y'],
                'Out',
                no_grad_set=set("X"),
                check_dygraph=self.check_dygraph(),
                check_prim=self.check_prim,
            )

    def test_check_grad_ingore_y(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            self.check_grad_with_place(
                place,
                ['X'],
                'Out',
                no_grad_set=set('Y'),
                check_dygraph=self.check_dygraph(),
                check_prim=self.check_prim,
            )

G
gongweibao 已提交
191

192
@unittest.skipIf(
193 194
    not core.is_compiled_with_cuda()
    or core.cudnn_version() < 8100
195
    or paddle.device.cuda.get_device_capability()[0] < 8,
196
    "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0",
197
)
198 199 200
class TestBF16ElementwiseAddOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_add"
201
        self.python_api = paddle.add
202
        self.prim_op_type = "prim"
203 204 205 206 207 208 209 210 211
        self.dtype = np.uint16

        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.out = np.add(self.x, self.y)

        self.axis = -1

        self.inputs = {
212 213 214 215 216 217
            'X': OpTest.np_dtype_to_fluid_dtype(
                convert_float_to_uint16(self.x)
            ),
            'Y': OpTest.np_dtype_to_fluid_dtype(
                convert_float_to_uint16(self.y)
            ),
218 219 220
        }
        self.attrs = {'axis': self.axis, 'use_mkldnn': False}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
221
        self.if_skip_cinn()
222 223 224

    def test_check_output(self):
        place = core.CUDAPlace(0)
225
        self.check_output_with_place(place)
226 227 228

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
229
        self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_prim=True)
230 231 232

    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
233 234 235
        self.check_grad_with_place(
            place, ['Y'], 'Out', no_grad_set=set("X"), check_prim=True
        )
236 237 238

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
239 240 241 242 243 244
        self.check_grad_with_place(
            place, ['X'], 'Out', no_grad_set=set('Y'), check_prim=True
        )

    def if_skip_cinn(self):
        self.enable_cinn = False
245 246


247
@skip_check_grad_ci(
248 249
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
250
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
K
Kexin Zhao 已提交
251 252 253 254 255 256
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y


257
@skip_check_grad_ci(
258 259
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
260 261 262 263 264
class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y
265

266 267 268
    def only_prim(self):
        self.only_prim = True

269

270
@skip_check_grad_ci(
271 272
    reason="[skip shape check] Use y_shape(1,1) to test broadcast."
)
K
Kexin Zhao 已提交
273
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
274 275 276 277 278 279
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y


280
@skip_check_grad_ci(
281 282
    reason="[skip shape check] Use y_shape(1,1) to test broadcast."
)
K
Kexin Zhao 已提交
283 284 285 286 287
class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y
288 289


K
Kexin Zhao 已提交
290
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
K
Kexin Zhao 已提交
291
    def init_input_output(self):
292 293
        self.x = np.random.random((100,)).astype(self.dtype)
        self.y = np.random.random((100,)).astype(self.dtype)
K
Kexin Zhao 已提交
294 295 296 297 298
        self.out = np.add(self.x, self.y)


class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
    def init_input_output(self):
299 300
        self.x = np.random.random((100,)).astype(self.dtype)
        self.y = np.random.random((100,)).astype(self.dtype)
K
Kexin Zhao 已提交
301
        self.out = np.add(self.x, self.y)
G
gongweibao 已提交
302 303


K
Kexin Zhao 已提交
304
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
305
    def init_input_output(self):
306 307 308
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
309
        self.python_api = paddle.add
G
gongweibao 已提交
310

K
Kexin Zhao 已提交
311 312 313
    def init_axis(self):
        self.axis = 0

314 315 316
    def if_check_prim(self):
        self.check_prim = False

K
Kexin Zhao 已提交
317

318 319 320
@skip_check_grad_ci(
    reason="the numerical method is not accurate enough on fp16"
)
K
Kexin Zhao 已提交
321 322
class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
323 324 325
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
326
        self.python_api = paddle.add
K
Kexin Zhao 已提交
327 328 329

    def init_axis(self):
        self.axis = 0
G
gongweibao 已提交
330

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
    # In paddle2.0 api we don't have axis parameter in add,
    # so we can't check prim when axis is not -1 by default.
    def if_check_prim(self):
        self.check_prim = self.axis == -1

    # Because the numerical method is not accurate enough on fp16,
    # so we do not test the grad on fp16
    def test_check_grad_normal(self):
        pass

    def test_check_grad_ingore_x(self):
        pass

    def test_check_grad_ingore_y(self):
        pass

G
gongweibao 已提交
347

K
Kexin Zhao 已提交
348
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
349
    def init_input_output(self):
350 351 352
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
353
        self.python_api = paddle.add
G
gongweibao 已提交
354

K
Kexin Zhao 已提交
355 356 357
    def init_axis(self):
        self.axis = 1

358 359
    def if_check_prim(self):
        self.check_prim = False
K
Kexin Zhao 已提交
360

361 362 363 364

class TestFP16ElementwiseAddOp_broadcast_1(
    TestFP16ElementwiseAddOp_broadcast_0
):
K
Kexin Zhao 已提交
365
    def init_input_output(self):
366 367 368
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
369
        self.python_api = paddle.add
K
Kexin Zhao 已提交
370 371 372

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
373 374


K
Kexin Zhao 已提交
375
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
376
    def init_input_output(self):
377 378 379
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
380
        self.python_api = paddle.add
G
gongweibao 已提交
381

K
Kexin Zhao 已提交
382

383 384 385
class TestFP16ElementwiseAddOp_broadcast_2(
    TestFP16ElementwiseAddOp_broadcast_0
):
K
Kexin Zhao 已提交
386
    def init_input_output(self):
387 388 389
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
390 391 392 393
        self.python_api = paddle.add

    def init_axis(self):
        self.axis = -1
G
gongweibao 已提交
394 395


K
Kexin Zhao 已提交
396
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
K
Kexin Zhao 已提交
397
    def init_input_output(self):
398
        self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype)
399 400
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
401
        self.python_api = paddle.add
G
gongweibao 已提交
402

K
Kexin Zhao 已提交
403 404 405 406
    def init_axis(self):
        self.axis = 1


407 408 409
class TestFP16ElementwiseAddOp_broadcast_3(
    TestFP16ElementwiseAddOp_broadcast_0
):
K
Kexin Zhao 已提交
410
    def init_input_output(self):
411 412 413
        self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
414
        self.python_api = paddle.add
K
Kexin Zhao 已提交
415 416 417

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
418 419


K
Kexin Zhao 已提交
420
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
K
Kexin Zhao 已提交
421
    def init_input_output(self):
422
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
423 424
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
425
        self.python_api = paddle.add
K
Kexin Zhao 已提交
426 427 428

    def init_axis(self):
        self.axis = 0
429

K
Kexin Zhao 已提交
430

431 432 433
class TestFP16ElementwiseAddOp_broadcast_4(
    TestFP16ElementwiseAddOp_broadcast_0
):
K
Kexin Zhao 已提交
434
    def init_input_output(self):
435
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
436 437
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
438
        self.python_api = paddle.add
K
Kexin Zhao 已提交
439 440 441

    def init_axis(self):
        self.axis = 0
442 443


444 445
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
    def init_input_output(self):
446 447
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
448 449 450
        self.out = self.x + self.y


451 452 453
class TestFP16ElementwiseAddOp_broadcast_5(
    TestFP16ElementwiseAddOp_broadcast_0
):
454
    def init_input_output(self):
455 456
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
457 458 459 460 461
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
    def init_input_output(self):
462 463
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
464 465 466 467 468 469 470
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
        self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
471 472 473
        self.out = self.x + self.y


474 475 476
class TestFP16ElementwiseAddOp_broadcast_6(
    TestFP16ElementwiseAddOp_broadcast_0
):
477
    def init_input_output(self):
478 479
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
480 481 482
        self.out = self.x + self.y


K
Kexin Zhao 已提交
483
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
484
    def init_input_output(self):
485 486 487
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
Q
qijun 已提交
488

K
Kexin Zhao 已提交
489 490 491 492
    def init_axis(self):
        self.axis = 1


493 494 495
@skip_check_grad_ci(
    reason="the numerical method is not accurate enough on fp16."
)
K
Kexin Zhao 已提交
496 497
class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
498 499 500
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
K
Kexin Zhao 已提交
501 502 503

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
504

505 506 507 508 509 510 511 512 513 514 515
    # Because the numerical method is not accurate enough on fp16,
    # so we do not test the grad on fp16
    def test_check_grad_normal(self):
        pass

    def test_check_grad_ingore_x(self):
        pass

    def test_check_grad_ingore_y(self):
        pass

Q
qijun 已提交
516

K
Kexin Zhao 已提交
517
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
518
    def init_input_output(self):
519
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
520 521
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)
Q
qijun 已提交
522

K
Kexin Zhao 已提交
523 524 525

class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
526
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
527 528 529
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)

Q
qijun 已提交
530

531 532
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
    def init_input_output(self):
533 534
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
535 536 537 538 539 540 541 542
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp):
    def init_input_output(self):
543 544
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
545 546 547 548 549 550
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


551 552
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
    def init_input_output(self):
553 554
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
555 556 557 558 559 560
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


561 562
class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
563
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
564 565 566 567 568 569 570
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


571 572
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
    def init_input_output(self):
573 574
        self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
575 576 577 578 579 580 581 582
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
    def init_input_output(self):
583
        self.x = np.random.rand(10, 12).astype(self.dtype)
584
        self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype)
585 586 587 588 589 590
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 2


591 592 593
class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(10, 1, 12).astype(self.dtype)
594
        self.y = np.random.rand(10, 2, 12).astype(self.dtype)
595 596 597 598 599 600
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 0


601 602 603 604
class TestAddApi(unittest.TestCase):
    def _executed_api(self, x, y, name=None):
        return paddle.add(x, y, name)

605 606 607 608 609
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')

610
            y_1 = self._executed_api(x, y, name='add_res')
611 612
            self.assertEqual(('add_res' in y_1.name), True)

Y
Yang Zhang 已提交
613
    def test_declarative(self):
614 615 616 617 618
        with fluid.program_guard(fluid.Program()):

            def gen_data():
                return {
                    "x": np.array([2, 3, 4]).astype('float32'),
619
                    "y": np.array([1, 5, 2]).astype('float32'),
620 621 622 623
                }

            x = fluid.data(name="x", shape=[3], dtype='float32')
            y = fluid.data(name="y", shape=[3], dtype='float32')
624
            z = self._executed_api(x, y)
625 626 627 628

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
629
            z_expected = np.array([3.0, 8.0, 6.0])
630 631 632 633 634 635 636 637
            self.assertEqual((z_value == z_expected).all(), True)

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            y = fluid.dygraph.to_variable(np_y)
638
            z = self._executed_api(x, y)
639
            np_z = z.numpy()
640
            z_expected = np.array([3.0, 8.0, 6.0])
641 642 643
            self.assertEqual((np_z == z_expected).all(), True)


644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
class TestAddInplaceApi(TestAddApi):
    def _executed_api(self, x, y, name=None):
        return x.add_(y, name)


class TestAddInplaceBroadcastSuccess(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 4).astype('float')
        self.y_numpy = np.random.rand(3, 4).astype('float')

    def test_broadcast_success(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)
        inplace_result = x.add_(y)
        numpy_result = self.x_numpy + self.y_numpy
        self.assertEqual((inplace_result.numpy() == numpy_result).all(), True)
        paddle.enable_static()


class TestAddInplaceBroadcastSuccess2(TestAddInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float')
        self.y_numpy = np.random.rand(3, 1).astype('float')


class TestAddInplaceBroadcastSuccess3(TestAddInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float')
        self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float')


class TestAddInplaceBroadcastError(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(3, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')

    def test_broadcast_errors(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)

        def broadcast_shape_error():
            x.add_(y)

        self.assertRaises(ValueError, broadcast_shape_error)
        paddle.enable_static()


class TestAddInplaceBroadcastError2(TestAddInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


707 708 709
class TestComplexElementwiseAddOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_add"
710
        self.python_api = paddle.add
711 712
        self.dtype = np.float64
        self.shape = (2, 3, 4, 5)
713 714 715 716 717
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
718
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
719 720 721 722 723 724 725 726
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
727
        self.x = np.random.random(self.shape).astype(
728 729
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
730
        self.y = np.random.random(self.shape).astype(
731 732
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
733 734 735
        self.out = self.x + self.y

    def init_grad_input_output(self):
736 737 738
        self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones(
            self.shape, self.dtype
        )
739 740 741 742
        self.grad_x = self.grad_out
        self.grad_y = self.grad_out

    def test_check_output(self):
743
        self.check_output()
744 745

    def test_check_grad_normal(self):
746 747 748 749 750 751
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
752 753

    def test_check_grad_ingore_x(self):
754 755 756 757 758 759 760
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
761 762

    def test_check_grad_ingore_y(self):
763 764 765 766 767 768 769
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out],
        )
770 771


772 773 774 775
class TestRealComplexElementwiseAddOp(TestComplexElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
776 777
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
778 779 780
        self.out = self.x + self.y

    def init_grad_input_output(self):
781 782 783
        self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones(
            self.shape, self.dtype
        )
784 785 786 787
        self.grad_x = np.real(self.grad_out)
        self.grad_y = self.grad_out


788 789 790 791 792 793 794 795 796
class TestBoolAddFloatElementwiseAddop(unittest.TestCase):
    def test_static_add(self):
        paddle.enable_static()
        a = 1.5
        b = paddle.full([4, 5, 6], True, dtype='bool')
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)
        paddle.enable_static()

797
    def test_dygraph_add(self):
798 799
        paddle.disable_static()
        a = 1.5
800 801
        b = paddle.full([2], True, dtype='bool')
        # special case: scalar + tensor(bool)
802 803 804
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)

805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
        np_a = np.random.random((2, 3, 4)).astype(np.float64)
        np_b = np.random.random((2, 3, 4)).astype(np.float64)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: tensor + tensor
        expect_out = np_a + np_b
        actual_out = tensor_a + tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor + scalar
        expect_out = np_a + 1
        actual_out = tensor_a + 1
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: scalar + tenor
        expect_out = 1 + np_a
        actual_out = 1 + tensor_a
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()

828

829
class TestElementwiseAddop1(unittest.TestCase):
830
    def test_dygraph_add(self):
831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
        paddle.disable_static()

        np_a = np.random.random((2, 3, 4)).astype(np.float32)
        np_b = np.random.random((2, 3, 4)).astype(np.float32)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: nparray + tenor
        expect_out = np_a + np_b
        actual_out = np_a + tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor + nparray
        actual_out = tensor_a + np_b
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()


851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
class TestTensorAddNumpyScalar(unittest.TestCase):
    def test_float32_add(self):
        paddle.disable_static()
        a = paddle.full([4, 5, 6], 1.5, dtype='float32')
        b = np.array([1.5], dtype='float32')[0]
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)

    def test_float16_add(self):
        if not core.is_compiled_with_cuda():
            return
        paddle.disable_static()
        a = paddle.full([4, 5, 6], 1.5, dtype='float16')
        b = np.array([1.5], dtype='float16')[0]
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP16)


G
gongweibao 已提交
869
if __name__ == '__main__':
870
    paddle.enable_static()
G
gongweibao 已提交
871
    unittest.main()