test_elementwise_add_op.py 27.8 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
import os
G
gongweibao 已提交
16
import unittest
17
import warnings
18

G
gongweibao 已提交
19
import numpy as np
20
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
21

22
import paddle
23 24
from paddle import fluid
from paddle.fluid import core
25
from paddle.fluid.layer_helper import LayerHelper
26 27


K
Kexin Zhao 已提交
28
class TestElementwiseAddOp(OpTest):
29 30 31
    def init_kernel_type(self):
        self.use_mkldnn = False

G
gongweibao 已提交
32 33
    def setUp(self):
        self.op_type = "elementwise_add"
H
hong 已提交
34
        self.python_api = paddle.add
35
        self.public_python_api = paddle.add
36
        self.prim_op_type = "prim"
K
Kexin Zhao 已提交
37 38
        self.init_dtype()
        self.init_input_output()
39
        self.init_kernel_type()
K
Kexin Zhao 已提交
40
        self.init_axis()
41
        self.if_check_prim()
42
        self.if_enable_cinn()
K
Kexin Zhao 已提交
43

G
gongweibao 已提交
44
        self.inputs = {
K
Kexin Zhao 已提交
45
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
46
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
G
gongweibao 已提交
47
        }
48
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
K
Kexin Zhao 已提交
49
        self.outputs = {'Out': self.out}
G
gongweibao 已提交
50

51
    def check_dygraph(self):
52
        return not self.use_mkldnn and self.axis == -1
H
hong 已提交
53

G
gongweibao 已提交
54
    def test_check_output(self):
55
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
56
        self.check_output(
57
            check_dygraph=self.check_dygraph(), check_prim=self.check_prim
58
        )
G
gongweibao 已提交
59 60

    def test_check_grad_normal(self):
61
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
62 63
        if self.dtype == np.float16:
            return
64 65 66
        self.check_grad(
            ['X', 'Y'],
            'Out',
67
            check_dygraph=self.check_dygraph(),
68
            check_prim=self.check_prim,
69
        )
G
gongweibao 已提交
70 71

    def test_check_grad_ingore_x(self):
72
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
73 74
        if self.dtype == np.float16:
            return
75 76 77 78
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
79
            check_dygraph=self.check_dygraph(),
80
            check_prim=self.check_prim,
81
        )
G
gongweibao 已提交
82 83

    def test_check_grad_ingore_y(self):
84
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
85 86
        if self.dtype == np.float16:
            return
87 88 89 90
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
91
            check_dygraph=self.check_dygraph(),
92
            check_prim=self.check_prim,
93
        )
G
gongweibao 已提交
94

K
Kexin Zhao 已提交
95 96 97 98 99 100
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)

    def init_dtype(self):
101
        self.dtype = np.float64
K
Kexin Zhao 已提交
102 103

    def init_axis(self):
104
        self.axis = -1
K
Kexin Zhao 已提交
105

106 107 108
    def if_check_prim(self):
        self.check_prim = self.axis == -1

109
    def if_enable_cinn(self):
110 111
        pass

K
Kexin Zhao 已提交
112

113 114 115 116 117 118
class TestElementwiseAddOp_ZeroDim1(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.out = np.add(self.x, self.y)

119 120

class TestElementwiseAddOp_ZeroDim2(TestElementwiseAddOp_ZeroDim1):
121 122 123 124 125 126
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)


127
class TestElementwiseAddOp_ZeroDim3(TestElementwiseAddOp_ZeroDim1):
128 129 130 131 132 133
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.out = np.add(self.x, self.y)


134 135 136
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
K
Kexin Zhao 已提交
137
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
K
Kexin Zhao 已提交
138
    def init_dtype(self):
K
Kexin Zhao 已提交
139 140 141
        self.dtype = np.float16

    def test_check_output(self):
142
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
        place = core.CUDAPlace(0)
        self.check_output_with_place(
            place,
            atol=1e-3,
            check_dygraph=self.check_dygraph(),
            check_prim=self.check_prim,
        )

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_prim=True)

    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(
            place, ['Y'], 'Out', no_grad_set=set("X"), check_prim=True
        )

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(
            place, ['X'], 'Out', no_grad_set=set('Y'), check_prim=True
        )
K
Kexin Zhao 已提交
166

G
gongweibao 已提交
167

168
@unittest.skipIf(
169 170
    not core.is_compiled_with_cuda()
    or core.cudnn_version() < 8100
171
    or paddle.device.cuda.get_device_capability()[0] < 8,
172
    "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0",
173
)
174 175 176
class TestBF16ElementwiseAddOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_add"
177
        self.python_api = paddle.add
178
        self.public_python_api = paddle.add
179
        self.prim_op_type = "prim"
180 181 182 183 184 185 186 187 188
        self.dtype = np.uint16

        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.out = np.add(self.x, self.y)

        self.axis = -1

        self.inputs = {
189 190 191 192 193 194
            'X': OpTest.np_dtype_to_fluid_dtype(
                convert_float_to_uint16(self.x)
            ),
            'Y': OpTest.np_dtype_to_fluid_dtype(
                convert_float_to_uint16(self.y)
            ),
195 196 197
        }
        self.attrs = {'axis': self.axis, 'use_mkldnn': False}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
198
        self.if_enable_cinn()
199 200 201

    def test_check_output(self):
        place = core.CUDAPlace(0)
202
        self.check_output_with_place(place)
203 204 205

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
206
        self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_prim=True)
207 208 209

    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
210 211 212
        self.check_grad_with_place(
            place, ['Y'], 'Out', no_grad_set=set("X"), check_prim=True
        )
213 214 215

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
216 217 218 219
        self.check_grad_with_place(
            place, ['X'], 'Out', no_grad_set=set('Y'), check_prim=True
        )

220
    def if_enable_cinn(self):
221
        self.enable_cinn = False
222 223


224
@skip_check_grad_ci(
225 226
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
227
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
K
Kexin Zhao 已提交
228 229 230 231 232 233
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y


234
@skip_check_grad_ci(
235 236
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
237 238 239 240 241
class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y
242 243


244
@skip_check_grad_ci(
245 246
    reason="[skip shape check] Use y_shape(1,1) to test broadcast."
)
K
Kexin Zhao 已提交
247
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
248 249 250 251 252 253
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y


254
@skip_check_grad_ci(
255 256
    reason="[skip shape check] Use y_shape(1,1) to test broadcast."
)
K
Kexin Zhao 已提交
257 258 259 260 261
class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y
262 263


K
Kexin Zhao 已提交
264
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
K
Kexin Zhao 已提交
265
    def init_input_output(self):
266 267
        self.x = np.random.random((100,)).astype(self.dtype)
        self.y = np.random.random((100,)).astype(self.dtype)
K
Kexin Zhao 已提交
268 269 270 271 272
        self.out = np.add(self.x, self.y)


class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
    def init_input_output(self):
273 274
        self.x = np.random.random((100,)).astype(self.dtype)
        self.y = np.random.random((100,)).astype(self.dtype)
K
Kexin Zhao 已提交
275
        self.out = np.add(self.x, self.y)
G
gongweibao 已提交
276 277


K
Kexin Zhao 已提交
278
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
279
    def init_input_output(self):
280 281 282
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
283
        self.python_api = paddle.add
G
gongweibao 已提交
284

K
Kexin Zhao 已提交
285 286 287
    def init_axis(self):
        self.axis = 0

288 289 290
    def if_check_prim(self):
        self.check_prim = False

K
Kexin Zhao 已提交
291

292 293 294
@skip_check_grad_ci(
    reason="the numerical method is not accurate enough on fp16"
)
K
Kexin Zhao 已提交
295 296
class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
297 298 299
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
300
        self.python_api = paddle.add
K
Kexin Zhao 已提交
301 302 303

    def init_axis(self):
        self.axis = 0
G
gongweibao 已提交
304

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
    # In paddle2.0 api we don't have axis parameter in add,
    # so we can't check prim when axis is not -1 by default.
    def if_check_prim(self):
        self.check_prim = self.axis == -1

    # Because the numerical method is not accurate enough on fp16,
    # so we do not test the grad on fp16
    def test_check_grad_normal(self):
        pass

    def test_check_grad_ingore_x(self):
        pass

    def test_check_grad_ingore_y(self):
        pass

G
gongweibao 已提交
321

K
Kexin Zhao 已提交
322
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
323
    def init_input_output(self):
324 325 326
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
327
        self.python_api = paddle.add
G
gongweibao 已提交
328

K
Kexin Zhao 已提交
329 330 331
    def init_axis(self):
        self.axis = 1

332 333
    def if_check_prim(self):
        self.check_prim = False
K
Kexin Zhao 已提交
334

335 336 337 338

class TestFP16ElementwiseAddOp_broadcast_1(
    TestFP16ElementwiseAddOp_broadcast_0
):
K
Kexin Zhao 已提交
339
    def init_input_output(self):
340 341 342
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
343
        self.python_api = paddle.add
K
Kexin Zhao 已提交
344 345 346

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
347 348


K
Kexin Zhao 已提交
349
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
350
    def init_input_output(self):
351 352 353
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
354
        self.python_api = paddle.add
G
gongweibao 已提交
355

K
Kexin Zhao 已提交
356

357 358 359
class TestFP16ElementwiseAddOp_broadcast_2(
    TestFP16ElementwiseAddOp_broadcast_0
):
K
Kexin Zhao 已提交
360
    def init_input_output(self):
361 362 363
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
364 365 366 367
        self.python_api = paddle.add

    def init_axis(self):
        self.axis = -1
G
gongweibao 已提交
368 369


K
Kexin Zhao 已提交
370
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
K
Kexin Zhao 已提交
371
    def init_input_output(self):
372
        self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype)
373 374
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
375
        self.python_api = paddle.add
G
gongweibao 已提交
376

K
Kexin Zhao 已提交
377 378 379 380
    def init_axis(self):
        self.axis = 1


381 382 383
class TestFP16ElementwiseAddOp_broadcast_3(
    TestFP16ElementwiseAddOp_broadcast_0
):
K
Kexin Zhao 已提交
384
    def init_input_output(self):
385 386 387
        self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
388
        self.python_api = paddle.add
K
Kexin Zhao 已提交
389 390 391

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
392 393


K
Kexin Zhao 已提交
394
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
K
Kexin Zhao 已提交
395
    def init_input_output(self):
396
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
397 398
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
399
        self.python_api = paddle.add
K
Kexin Zhao 已提交
400 401 402

    def init_axis(self):
        self.axis = 0
403

K
Kexin Zhao 已提交
404

405 406 407
class TestFP16ElementwiseAddOp_broadcast_4(
    TestFP16ElementwiseAddOp_broadcast_0
):
K
Kexin Zhao 已提交
408
    def init_input_output(self):
409
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
410 411
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
412
        self.python_api = paddle.add
K
Kexin Zhao 已提交
413 414 415

    def init_axis(self):
        self.axis = 0
416 417


418 419
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
    def init_input_output(self):
420 421
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
422 423 424
        self.out = self.x + self.y


425 426 427
class TestFP16ElementwiseAddOp_broadcast_5(
    TestFP16ElementwiseAddOp_broadcast_0
):
428
    def init_input_output(self):
429 430
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
431 432 433 434 435
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
    def init_input_output(self):
436 437
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
438 439 440 441 442 443 444
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
        self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
445 446 447
        self.out = self.x + self.y


448 449 450
class TestFP16ElementwiseAddOp_broadcast_6(
    TestFP16ElementwiseAddOp_broadcast_0
):
451
    def init_input_output(self):
452 453
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
454 455 456
        self.out = self.x + self.y


K
Kexin Zhao 已提交
457
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
458
    def init_input_output(self):
459 460 461
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
Q
qijun 已提交
462

K
Kexin Zhao 已提交
463 464 465 466
    def init_axis(self):
        self.axis = 1


467 468 469
@skip_check_grad_ci(
    reason="the numerical method is not accurate enough on fp16."
)
K
Kexin Zhao 已提交
470 471
class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
472 473 474
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
K
Kexin Zhao 已提交
475 476 477

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
478

479 480 481 482 483 484 485 486 487 488 489
    # Because the numerical method is not accurate enough on fp16,
    # so we do not test the grad on fp16
    def test_check_grad_normal(self):
        pass

    def test_check_grad_ingore_x(self):
        pass

    def test_check_grad_ingore_y(self):
        pass

Q
qijun 已提交
490

K
Kexin Zhao 已提交
491
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
492
    def init_input_output(self):
493 494 495 496
        self.x = np.random.rand(10, 100, 1).astype(self.dtype)
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)

K
Kexin Zhao 已提交
497

498 499 500
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
501 502
class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
503
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
504
        self.y = np.random.rand(1).astype(self.dtype)
505
        self.out = self.x + self.y
K
Kexin Zhao 已提交
506

Q
qijun 已提交
507

508 509
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
    def init_input_output(self):
510 511
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
512 513 514 515 516 517 518 519
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp):
    def init_input_output(self):
520 521
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
522 523 524 525 526 527
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


528 529
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
    def init_input_output(self):
530 531
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
532 533 534 535 536 537
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


538 539
class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
540
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
541 542 543 544 545 546 547
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


548 549
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
    def init_input_output(self):
550 551
        self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
552 553 554 555 556 557 558 559
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
    def init_input_output(self):
560
        self.x = np.random.rand(10, 12).astype(self.dtype)
561
        self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype)
562 563 564 565 566 567
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 2


568 569 570
class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(10, 1, 12).astype(self.dtype)
571
        self.y = np.random.rand(10, 2, 12).astype(self.dtype)
572 573 574 575 576 577
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 0


578 579 580 581
class TestAddApi(unittest.TestCase):
    def _executed_api(self, x, y, name=None):
        return paddle.add(x, y, name)

582 583
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
584 585
            x = paddle.static.data(name="x", shape=[2, 3], dtype="float32")
            y = paddle.static.data(name='y', shape=[2, 3], dtype='float32')
586

587
            y_1 = self._executed_api(x, y, name='add_res')
588 589
            self.assertEqual(('add_res' in y_1.name), True)

Y
Yang Zhang 已提交
590
    def test_declarative(self):
591 592 593 594 595
        with fluid.program_guard(fluid.Program()):

            def gen_data():
                return {
                    "x": np.array([2, 3, 4]).astype('float32'),
596
                    "y": np.array([1, 5, 2]).astype('float32'),
597 598
                }

599 600
            x = paddle.static.data(name="x", shape=[3], dtype='float32')
            y = paddle.static.data(name="y", shape=[3], dtype='float32')
601
            z = self._executed_api(x, y)
602 603 604 605

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
606
            z_expected = np.array([3.0, 8.0, 6.0])
607 608 609 610 611 612 613 614
            self.assertEqual((z_value == z_expected).all(), True)

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            y = fluid.dygraph.to_variable(np_y)
615
            z = self._executed_api(x, y)
616
            np_z = z.numpy()
617
            z_expected = np.array([3.0, 8.0, 6.0])
618 619 620
            self.assertEqual((np_z == z_expected).all(), True)


621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
class TestAddInplaceApi(TestAddApi):
    def _executed_api(self, x, y, name=None):
        return x.add_(y, name)


class TestAddInplaceBroadcastSuccess(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 4).astype('float')
        self.y_numpy = np.random.rand(3, 4).astype('float')

    def test_broadcast_success(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)
        inplace_result = x.add_(y)
        numpy_result = self.x_numpy + self.y_numpy
        self.assertEqual((inplace_result.numpy() == numpy_result).all(), True)
        paddle.enable_static()


class TestAddInplaceBroadcastSuccess2(TestAddInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float')
        self.y_numpy = np.random.rand(3, 1).astype('float')


class TestAddInplaceBroadcastSuccess3(TestAddInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float')
        self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float')


class TestAddInplaceBroadcastError(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(3, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')

    def test_broadcast_errors(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)

        def broadcast_shape_error():
            x.add_(y)

        self.assertRaises(ValueError, broadcast_shape_error)
        paddle.enable_static()


class TestAddInplaceBroadcastError2(TestAddInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


684 685 686
class TestComplexElementwiseAddOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_add"
687
        self.python_api = paddle.add
688 689
        self.dtype = np.float64
        self.shape = (2, 3, 4, 5)
690 691 692 693 694
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
695
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
696 697 698 699 700 701 702 703
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
704
        self.x = np.random.random(self.shape).astype(
705 706
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
707
        self.y = np.random.random(self.shape).astype(
708 709
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
710 711 712
        self.out = self.x + self.y

    def init_grad_input_output(self):
713 714 715
        self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones(
            self.shape, self.dtype
        )
716 717 718 719
        self.grad_x = self.grad_out
        self.grad_y = self.grad_out

    def test_check_output(self):
720
        self.check_output()
721 722

    def test_check_grad_normal(self):
723 724 725 726 727 728
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
729 730

    def test_check_grad_ingore_x(self):
731 732 733 734 735 736 737
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
738 739

    def test_check_grad_ingore_y(self):
740 741 742 743 744 745 746
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out],
        )
747 748


749 750 751 752
class TestRealComplexElementwiseAddOp(TestComplexElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
753 754
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
755 756 757
        self.out = self.x + self.y

    def init_grad_input_output(self):
758 759 760
        self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones(
            self.shape, self.dtype
        )
761 762 763 764
        self.grad_x = np.real(self.grad_out)
        self.grad_y = self.grad_out


765 766 767 768 769 770 771 772 773
class TestBoolAddFloatElementwiseAddop(unittest.TestCase):
    def test_static_add(self):
        paddle.enable_static()
        a = 1.5
        b = paddle.full([4, 5, 6], True, dtype='bool')
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)
        paddle.enable_static()

774
    def test_dygraph_add(self):
775 776
        paddle.disable_static()
        a = 1.5
777 778
        b = paddle.full([2], True, dtype='bool')
        # special case: scalar + tensor(bool)
779 780 781
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)

782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
        np_a = np.random.random((2, 3, 4)).astype(np.float64)
        np_b = np.random.random((2, 3, 4)).astype(np.float64)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: tensor + tensor
        expect_out = np_a + np_b
        actual_out = tensor_a + tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor + scalar
        expect_out = np_a + 1
        actual_out = tensor_a + 1
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: scalar + tenor
        expect_out = 1 + np_a
        actual_out = 1 + tensor_a
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()

805

806
class TestElementwiseAddop1(unittest.TestCase):
807
    def test_dygraph_add(self):
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
        paddle.disable_static()

        np_a = np.random.random((2, 3, 4)).astype(np.float32)
        np_b = np.random.random((2, 3, 4)).astype(np.float32)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: nparray + tenor
        expect_out = np_a + np_b
        actual_out = np_a + tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor + nparray
        actual_out = tensor_a + np_b
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()


828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
class TestTensorAddNumpyScalar(unittest.TestCase):
    def test_float32_add(self):
        paddle.disable_static()
        a = paddle.full([4, 5, 6], 1.5, dtype='float32')
        b = np.array([1.5], dtype='float32')[0]
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)

    def test_float16_add(self):
        if not core.is_compiled_with_cuda():
            return
        paddle.disable_static()
        a = paddle.full([4, 5, 6], 1.5, dtype='float16')
        b = np.array([1.5], dtype='float16')[0]
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP16)


846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
class TestTensorAddAPIWarnings(unittest.TestCase):
    def test_warnings(self):

        with warnings.catch_warnings(record=True) as context:
            warnings.simplefilter("always")

            paddle.enable_static()
            helper = LayerHelper("elementwise_add")
            data = paddle.static.data(
                name='data', shape=[None, 3, 32, 32], dtype='float32'
            )
            out = helper.create_variable_for_type_inference(dtype=data.dtype)
            os.environ['FLAGS_print_extra_attrs'] = "1"
            helper.append_op(
                type="elementwise_add",
                inputs={'X': data, 'Y': data},
                outputs={'Out': out},
                attrs={'axis': 1, 'use_mkldnn': False},
            )
            self.assertTrue(
                "op elementwise_add's attr axis = 1 is not the default value: -1"
                in str(context[-1].message)
            )
            os.environ['FLAGS_print_extra_attrs'] = "0"


G
gongweibao 已提交
872
if __name__ == '__main__':
873
    paddle.enable_static()
G
gongweibao 已提交
874
    unittest.main()