test_elementwise_add_op.py 27.4 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
import os
G
gongweibao 已提交
16
import unittest
17
import warnings
18

G
gongweibao 已提交
19
import numpy as np
20
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
21

22
import paddle
23
import paddle.fluid as fluid
K
Kexin Zhao 已提交
24
import paddle.fluid.core as core
25
from paddle.fluid.layer_helper import LayerHelper
26 27


K
Kexin Zhao 已提交
28
class TestElementwiseAddOp(OpTest):
29 30 31
    def init_kernel_type(self):
        self.use_mkldnn = False

G
gongweibao 已提交
32 33
    def setUp(self):
        self.op_type = "elementwise_add"
H
hong 已提交
34
        self.python_api = paddle.add
35
        self.prim_op_type = "prim"
K
Kexin Zhao 已提交
36 37
        self.init_dtype()
        self.init_input_output()
38
        self.init_kernel_type()
K
Kexin Zhao 已提交
39
        self.init_axis()
40
        self.if_check_prim()
41
        self.if_enable_cinn()
K
Kexin Zhao 已提交
42

G
gongweibao 已提交
43
        self.inputs = {
K
Kexin Zhao 已提交
44
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
45
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
G
gongweibao 已提交
46
        }
47
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
K
Kexin Zhao 已提交
48
        self.outputs = {'Out': self.out}
G
gongweibao 已提交
49

50
    def check_dygraph(self):
51
        return not self.use_mkldnn and self.axis == -1
H
hong 已提交
52

G
gongweibao 已提交
53
    def test_check_output(self):
54
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
55
        self.check_output(
56
            check_dygraph=self.check_dygraph(), check_prim=self.check_prim
57
        )
G
gongweibao 已提交
58 59

    def test_check_grad_normal(self):
60
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
61 62
        if self.dtype == np.float16:
            return
63 64 65
        self.check_grad(
            ['X', 'Y'],
            'Out',
66
            check_dygraph=self.check_dygraph(),
67
            check_prim=self.check_prim,
68
        )
G
gongweibao 已提交
69 70

    def test_check_grad_ingore_x(self):
71
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
72 73
        if self.dtype == np.float16:
            return
74 75 76 77
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
78
            check_dygraph=self.check_dygraph(),
79
            check_prim=self.check_prim,
80
        )
G
gongweibao 已提交
81 82

    def test_check_grad_ingore_y(self):
83
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
84 85
        if self.dtype == np.float16:
            return
86 87 88 89
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
90
            check_dygraph=self.check_dygraph(),
91
            check_prim=self.check_prim,
92
        )
G
gongweibao 已提交
93

K
Kexin Zhao 已提交
94 95 96 97 98 99
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)

    def init_dtype(self):
100
        self.dtype = np.float64
K
Kexin Zhao 已提交
101 102

    def init_axis(self):
103
        self.axis = -1
K
Kexin Zhao 已提交
104

105 106 107
    def if_check_prim(self):
        self.check_prim = self.axis == -1

108
    def if_enable_cinn(self):
109 110
        pass

K
Kexin Zhao 已提交
111

112 113 114 115 116 117
class TestElementwiseAddOp_ZeroDim1(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.out = np.add(self.x, self.y)

118
    def if_enable_cinn(self):
119
        self.enable_cinn = False
120

121 122

class TestElementwiseAddOp_ZeroDim2(TestElementwiseAddOp_ZeroDim1):
123 124 125 126 127 128
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)


129
class TestElementwiseAddOp_ZeroDim3(TestElementwiseAddOp_ZeroDim1):
130 131 132 133 134 135
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, []).astype(self.dtype)
        self.out = np.add(self.x, self.y)


136 137 138
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
K
Kexin Zhao 已提交
139
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
K
Kexin Zhao 已提交
140
    def init_dtype(self):
K
Kexin Zhao 已提交
141 142 143
        self.dtype = np.float16

    def test_check_output(self):
144
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
145 146 147
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
148
                self.check_output_with_place(
149 150
                    place,
                    atol=1e-3,
151 152
                    check_dygraph=self.check_dygraph(),
                    check_prim=self.check_prim,
153
                )
K
Kexin Zhao 已提交
154

G
gongweibao 已提交
155

156
@unittest.skipIf(
157 158
    not core.is_compiled_with_cuda()
    or core.cudnn_version() < 8100
159
    or paddle.device.cuda.get_device_capability()[0] < 8,
160
    "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0",
161
)
162 163 164
class TestBF16ElementwiseAddOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_add"
165
        self.python_api = paddle.add
166
        self.prim_op_type = "prim"
167 168 169 170 171 172 173 174 175
        self.dtype = np.uint16

        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.out = np.add(self.x, self.y)

        self.axis = -1

        self.inputs = {
176 177 178 179 180 181
            'X': OpTest.np_dtype_to_fluid_dtype(
                convert_float_to_uint16(self.x)
            ),
            'Y': OpTest.np_dtype_to_fluid_dtype(
                convert_float_to_uint16(self.y)
            ),
182 183 184
        }
        self.attrs = {'axis': self.axis, 'use_mkldnn': False}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
185
        self.if_enable_cinn()
186 187 188

    def test_check_output(self):
        place = core.CUDAPlace(0)
189
        self.check_output_with_place(place)
190 191 192

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
193
        self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_prim=True)
194 195 196

    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
197 198 199
        self.check_grad_with_place(
            place, ['Y'], 'Out', no_grad_set=set("X"), check_prim=True
        )
200 201 202

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
203 204 205 206
        self.check_grad_with_place(
            place, ['X'], 'Out', no_grad_set=set('Y'), check_prim=True
        )

207
    def if_enable_cinn(self):
208
        self.enable_cinn = False
209 210


211
@skip_check_grad_ci(
212 213
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
214
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
K
Kexin Zhao 已提交
215 216 217 218 219 220
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y


221
@skip_check_grad_ci(
222 223
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
224 225 226 227 228
class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y
229 230


231
@skip_check_grad_ci(
232 233
    reason="[skip shape check] Use y_shape(1,1) to test broadcast."
)
K
Kexin Zhao 已提交
234
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
235 236 237 238 239 240
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y


241
@skip_check_grad_ci(
242 243
    reason="[skip shape check] Use y_shape(1,1) to test broadcast."
)
K
Kexin Zhao 已提交
244 245 246 247 248
class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y
249 250


K
Kexin Zhao 已提交
251
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
K
Kexin Zhao 已提交
252
    def init_input_output(self):
253 254
        self.x = np.random.random((100,)).astype(self.dtype)
        self.y = np.random.random((100,)).astype(self.dtype)
K
Kexin Zhao 已提交
255 256 257 258 259
        self.out = np.add(self.x, self.y)


class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
    def init_input_output(self):
260 261
        self.x = np.random.random((100,)).astype(self.dtype)
        self.y = np.random.random((100,)).astype(self.dtype)
K
Kexin Zhao 已提交
262
        self.out = np.add(self.x, self.y)
G
gongweibao 已提交
263 264


K
Kexin Zhao 已提交
265
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
266
    def init_input_output(self):
267 268 269
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
270
        self.python_api = paddle.add
G
gongweibao 已提交
271

K
Kexin Zhao 已提交
272 273 274
    def init_axis(self):
        self.axis = 0

275 276 277
    def if_check_prim(self):
        self.check_prim = False

K
Kexin Zhao 已提交
278

279 280 281
@skip_check_grad_ci(
    reason="the numerical method is not accurate enough on fp16"
)
K
Kexin Zhao 已提交
282 283
class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
284 285 286
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
287
        self.python_api = paddle.add
K
Kexin Zhao 已提交
288 289 290

    def init_axis(self):
        self.axis = 0
G
gongweibao 已提交
291

292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
    # In paddle2.0 api we don't have axis parameter in add,
    # so we can't check prim when axis is not -1 by default.
    def if_check_prim(self):
        self.check_prim = self.axis == -1

    # Because the numerical method is not accurate enough on fp16,
    # so we do not test the grad on fp16
    def test_check_grad_normal(self):
        pass

    def test_check_grad_ingore_x(self):
        pass

    def test_check_grad_ingore_y(self):
        pass

G
gongweibao 已提交
308

K
Kexin Zhao 已提交
309
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
310
    def init_input_output(self):
311 312 313
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
314
        self.python_api = paddle.add
G
gongweibao 已提交
315

K
Kexin Zhao 已提交
316 317 318
    def init_axis(self):
        self.axis = 1

319 320
    def if_check_prim(self):
        self.check_prim = False
K
Kexin Zhao 已提交
321

322 323 324 325

class TestFP16ElementwiseAddOp_broadcast_1(
    TestFP16ElementwiseAddOp_broadcast_0
):
K
Kexin Zhao 已提交
326
    def init_input_output(self):
327 328 329
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
330
        self.python_api = paddle.add
K
Kexin Zhao 已提交
331 332 333

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
334 335


K
Kexin Zhao 已提交
336
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
337
    def init_input_output(self):
338 339 340
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
341
        self.python_api = paddle.add
G
gongweibao 已提交
342

K
Kexin Zhao 已提交
343

344 345 346
class TestFP16ElementwiseAddOp_broadcast_2(
    TestFP16ElementwiseAddOp_broadcast_0
):
K
Kexin Zhao 已提交
347
    def init_input_output(self):
348 349 350
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
351 352 353 354
        self.python_api = paddle.add

    def init_axis(self):
        self.axis = -1
G
gongweibao 已提交
355 356


K
Kexin Zhao 已提交
357
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
K
Kexin Zhao 已提交
358
    def init_input_output(self):
359
        self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype)
360 361
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
362
        self.python_api = paddle.add
G
gongweibao 已提交
363

K
Kexin Zhao 已提交
364 365 366 367
    def init_axis(self):
        self.axis = 1


368 369 370
class TestFP16ElementwiseAddOp_broadcast_3(
    TestFP16ElementwiseAddOp_broadcast_0
):
K
Kexin Zhao 已提交
371
    def init_input_output(self):
372 373 374
        self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
375
        self.python_api = paddle.add
K
Kexin Zhao 已提交
376 377 378

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
379 380


K
Kexin Zhao 已提交
381
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
K
Kexin Zhao 已提交
382
    def init_input_output(self):
383
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
384 385
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
386
        self.python_api = paddle.add
K
Kexin Zhao 已提交
387 388 389

    def init_axis(self):
        self.axis = 0
390

K
Kexin Zhao 已提交
391

392 393 394
class TestFP16ElementwiseAddOp_broadcast_4(
    TestFP16ElementwiseAddOp_broadcast_0
):
K
Kexin Zhao 已提交
395
    def init_input_output(self):
396
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
397 398
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
399
        self.python_api = paddle.add
K
Kexin Zhao 已提交
400 401 402

    def init_axis(self):
        self.axis = 0
403 404


405 406
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
    def init_input_output(self):
407 408
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
409 410 411
        self.out = self.x + self.y


412 413 414
class TestFP16ElementwiseAddOp_broadcast_5(
    TestFP16ElementwiseAddOp_broadcast_0
):
415
    def init_input_output(self):
416 417
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
418 419 420 421 422
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
    def init_input_output(self):
423 424
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
425 426 427 428 429 430 431
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
        self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
432 433 434
        self.out = self.x + self.y


435 436 437
class TestFP16ElementwiseAddOp_broadcast_6(
    TestFP16ElementwiseAddOp_broadcast_0
):
438
    def init_input_output(self):
439 440
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
441 442 443
        self.out = self.x + self.y


K
Kexin Zhao 已提交
444
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
445
    def init_input_output(self):
446 447 448
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
Q
qijun 已提交
449

K
Kexin Zhao 已提交
450 451 452 453
    def init_axis(self):
        self.axis = 1


454 455 456
@skip_check_grad_ci(
    reason="the numerical method is not accurate enough on fp16."
)
K
Kexin Zhao 已提交
457 458
class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
459 460 461
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
K
Kexin Zhao 已提交
462 463 464

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
465

466 467 468 469 470 471 472 473 474 475 476
    # Because the numerical method is not accurate enough on fp16,
    # so we do not test the grad on fp16
    def test_check_grad_normal(self):
        pass

    def test_check_grad_ingore_x(self):
        pass

    def test_check_grad_ingore_y(self):
        pass

Q
qijun 已提交
477

K
Kexin Zhao 已提交
478
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
479
    def init_input_output(self):
480 481 482 483
        self.x = np.random.rand(10, 100, 1).astype(self.dtype)
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)

484
    def if_enable_cinn(self):
485
        self.enable_cinn = False
Q
qijun 已提交
486

K
Kexin Zhao 已提交
487 488 489

class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
490
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
491 492 493
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)

Q
qijun 已提交
494

495 496
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
    def init_input_output(self):
497 498
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
499 500 501 502 503 504 505 506
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp):
    def init_input_output(self):
507 508
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
509 510 511 512 513 514
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


515 516
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
    def init_input_output(self):
517 518
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
519 520 521 522 523 524
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


525 526
class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
527
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
528 529 530 531 532 533 534
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


535 536
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
    def init_input_output(self):
537 538
        self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
539 540 541 542 543 544 545 546
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
    def init_input_output(self):
547
        self.x = np.random.rand(10, 12).astype(self.dtype)
548
        self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype)
549 550 551 552 553 554
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 2


555 556 557
class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(10, 1, 12).astype(self.dtype)
558
        self.y = np.random.rand(10, 2, 12).astype(self.dtype)
559 560 561 562 563 564
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 0


565 566 567 568
class TestAddApi(unittest.TestCase):
    def _executed_api(self, x, y, name=None):
        return paddle.add(x, y, name)

569 570 571 572 573
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')

574
            y_1 = self._executed_api(x, y, name='add_res')
575 576
            self.assertEqual(('add_res' in y_1.name), True)

Y
Yang Zhang 已提交
577
    def test_declarative(self):
578 579 580 581 582
        with fluid.program_guard(fluid.Program()):

            def gen_data():
                return {
                    "x": np.array([2, 3, 4]).astype('float32'),
583
                    "y": np.array([1, 5, 2]).astype('float32'),
584 585 586 587
                }

            x = fluid.data(name="x", shape=[3], dtype='float32')
            y = fluid.data(name="y", shape=[3], dtype='float32')
588
            z = self._executed_api(x, y)
589 590 591 592

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
593
            z_expected = np.array([3.0, 8.0, 6.0])
594 595 596 597 598 599 600 601
            self.assertEqual((z_value == z_expected).all(), True)

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            y = fluid.dygraph.to_variable(np_y)
602
            z = self._executed_api(x, y)
603
            np_z = z.numpy()
604
            z_expected = np.array([3.0, 8.0, 6.0])
605 606 607
            self.assertEqual((np_z == z_expected).all(), True)


608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
class TestAddInplaceApi(TestAddApi):
    def _executed_api(self, x, y, name=None):
        return x.add_(y, name)


class TestAddInplaceBroadcastSuccess(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 4).astype('float')
        self.y_numpy = np.random.rand(3, 4).astype('float')

    def test_broadcast_success(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)
        inplace_result = x.add_(y)
        numpy_result = self.x_numpy + self.y_numpy
        self.assertEqual((inplace_result.numpy() == numpy_result).all(), True)
        paddle.enable_static()


class TestAddInplaceBroadcastSuccess2(TestAddInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float')
        self.y_numpy = np.random.rand(3, 1).astype('float')


class TestAddInplaceBroadcastSuccess3(TestAddInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float')
        self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float')


class TestAddInplaceBroadcastError(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(3, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')

    def test_broadcast_errors(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)

        def broadcast_shape_error():
            x.add_(y)

        self.assertRaises(ValueError, broadcast_shape_error)
        paddle.enable_static()


class TestAddInplaceBroadcastError2(TestAddInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


671 672 673
class TestComplexElementwiseAddOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_add"
674
        self.python_api = paddle.add
675 676
        self.dtype = np.float64
        self.shape = (2, 3, 4, 5)
677 678 679 680 681
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
682
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
683 684 685 686 687 688 689 690
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
691
        self.x = np.random.random(self.shape).astype(
692 693
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
694
        self.y = np.random.random(self.shape).astype(
695 696
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
697 698 699
        self.out = self.x + self.y

    def init_grad_input_output(self):
700 701 702
        self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones(
            self.shape, self.dtype
        )
703 704 705 706
        self.grad_x = self.grad_out
        self.grad_y = self.grad_out

    def test_check_output(self):
707
        self.check_output()
708 709

    def test_check_grad_normal(self):
710 711 712 713 714 715
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
716 717

    def test_check_grad_ingore_x(self):
718 719 720 721 722 723 724
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
725 726

    def test_check_grad_ingore_y(self):
727 728 729 730 731 732 733
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out],
        )
734 735


736 737 738 739
class TestRealComplexElementwiseAddOp(TestComplexElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
740 741
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
742 743 744
        self.out = self.x + self.y

    def init_grad_input_output(self):
745 746 747
        self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones(
            self.shape, self.dtype
        )
748 749 750 751
        self.grad_x = np.real(self.grad_out)
        self.grad_y = self.grad_out


752 753 754 755 756 757 758 759 760
class TestBoolAddFloatElementwiseAddop(unittest.TestCase):
    def test_static_add(self):
        paddle.enable_static()
        a = 1.5
        b = paddle.full([4, 5, 6], True, dtype='bool')
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)
        paddle.enable_static()

761
    def test_dygraph_add(self):
762 763
        paddle.disable_static()
        a = 1.5
764 765
        b = paddle.full([2], True, dtype='bool')
        # special case: scalar + tensor(bool)
766 767 768
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)

769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
        np_a = np.random.random((2, 3, 4)).astype(np.float64)
        np_b = np.random.random((2, 3, 4)).astype(np.float64)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: tensor + tensor
        expect_out = np_a + np_b
        actual_out = tensor_a + tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor + scalar
        expect_out = np_a + 1
        actual_out = tensor_a + 1
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: scalar + tenor
        expect_out = 1 + np_a
        actual_out = 1 + tensor_a
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()

792

793
class TestElementwiseAddop1(unittest.TestCase):
794
    def test_dygraph_add(self):
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
        paddle.disable_static()

        np_a = np.random.random((2, 3, 4)).astype(np.float32)
        np_b = np.random.random((2, 3, 4)).astype(np.float32)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: nparray + tenor
        expect_out = np_a + np_b
        actual_out = np_a + tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor + nparray
        actual_out = tensor_a + np_b
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()


815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
class TestTensorAddNumpyScalar(unittest.TestCase):
    def test_float32_add(self):
        paddle.disable_static()
        a = paddle.full([4, 5, 6], 1.5, dtype='float32')
        b = np.array([1.5], dtype='float32')[0]
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)

    def test_float16_add(self):
        if not core.is_compiled_with_cuda():
            return
        paddle.disable_static()
        a = paddle.full([4, 5, 6], 1.5, dtype='float16')
        b = np.array([1.5], dtype='float16')[0]
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP16)


833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
class TestTensorAddAPIWarnings(unittest.TestCase):
    def test_warnings(self):

        with warnings.catch_warnings(record=True) as context:
            warnings.simplefilter("always")

            paddle.enable_static()
            helper = LayerHelper("elementwise_add")
            data = paddle.static.data(
                name='data', shape=[None, 3, 32, 32], dtype='float32'
            )
            out = helper.create_variable_for_type_inference(dtype=data.dtype)
            os.environ['FLAGS_print_extra_attrs'] = "1"
            helper.append_op(
                type="elementwise_add",
                inputs={'X': data, 'Y': data},
                outputs={'Out': out},
                attrs={'axis': 1, 'use_mkldnn': False},
            )
            self.assertTrue(
                "op elementwise_add's attr axis = 1 is not the default value: -1"
                in str(context[-1].message)
            )
            os.environ['FLAGS_print_extra_attrs'] = "0"


G
gongweibao 已提交
859
if __name__ == '__main__':
860
    paddle.enable_static()
G
gongweibao 已提交
861
    unittest.main()