test_elementwise_add_op.py 23.7 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

G
gongweibao 已提交
15 16
import unittest
import numpy as np
17
import paddle
K
Kexin Zhao 已提交
18
import paddle.fluid.core as core
19 20 21 22 23
from paddle.fluid.tests.unittests.op_test import (
    OpTest,
    skip_check_grad_ci,
    convert_float_to_uint16,
)
24
import paddle.fluid as fluid
25
from paddle.fluid import Program, program_guard
26
from paddle.fluid.framework import _test_eager_guard
G
gongweibao 已提交
27 28


K
Kexin Zhao 已提交
29
class TestElementwiseAddOp(OpTest):
30 31 32
    def init_kernel_type(self):
        self.use_mkldnn = False

G
gongweibao 已提交
33 34
    def setUp(self):
        self.op_type = "elementwise_add"
H
hong 已提交
35
        self.python_api = paddle.add
K
Kexin Zhao 已提交
36 37
        self.init_dtype()
        self.init_input_output()
38
        self.init_kernel_type()
K
Kexin Zhao 已提交
39
        self.init_axis()
K
Kexin Zhao 已提交
40

G
gongweibao 已提交
41
        self.inputs = {
K
Kexin Zhao 已提交
42
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
43
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
G
gongweibao 已提交
44
        }
45
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
K
Kexin Zhao 已提交
46
        self.outputs = {'Out': self.out}
G
gongweibao 已提交
47

H
hong 已提交
48
    def check_eager(self):
49
        return self.use_mkldnn == False and self.axis == -1
H
hong 已提交
50

G
gongweibao 已提交
51
    def test_check_output(self):
52
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
53 54 55 56
        self.check_output(
            check_dygraph=(self.use_mkldnn == False),
            check_eager=self.check_eager(),
        )
G
gongweibao 已提交
57 58

    def test_check_grad_normal(self):
59
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
60 61
        if self.dtype == np.float16:
            return
62 63 64 65 66 67
        self.check_grad(
            ['X', 'Y'],
            'Out',
            check_dygraph=(self.use_mkldnn == False),
            check_eager=self.check_eager(),
        )
G
gongweibao 已提交
68 69

    def test_check_grad_ingore_x(self):
70
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
71 72
        if self.dtype == np.float16:
            return
73 74 75 76 77 78 79
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            check_dygraph=(self.use_mkldnn == False),
            check_eager=self.check_eager(),
        )
G
gongweibao 已提交
80 81

    def test_check_grad_ingore_y(self):
82
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
83 84
        if self.dtype == np.float16:
            return
85 86 87 88 89 90 91
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            check_dygraph=(self.use_mkldnn == False),
            check_eager=self.check_eager(),
        )
G
gongweibao 已提交
92

K
Kexin Zhao 已提交
93 94 95 96 97 98
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)

    def init_dtype(self):
99
        self.dtype = np.float64
K
Kexin Zhao 已提交
100 101

    def init_axis(self):
102
        self.axis = -1
K
Kexin Zhao 已提交
103 104


105 106 107
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
K
Kexin Zhao 已提交
108
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
K
Kexin Zhao 已提交
109
    def init_dtype(self):
K
Kexin Zhao 已提交
110 111 112
        self.dtype = np.float16

    def test_check_output(self):
113
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
114 115 116
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
117
                self.check_output_with_place(
118 119
                    place, atol=1e-3, check_dygraph=(self.use_mkldnn == False)
                )
K
Kexin Zhao 已提交
120

G
gongweibao 已提交
121

122
@unittest.skipIf(
123 124
    not core.is_compiled_with_cuda()
    or core.cudnn_version() < 8100
125
    or paddle.device.cuda.get_device_capability()[0] < 8,
126
    "only support compiled with CUDA and cudnn version need larger than 8.1.0 and device's compute capability is at least 8.0",
127
)
128 129 130 131 132 133 134 135 136 137 138 139
class TestBF16ElementwiseAddOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_add"
        self.dtype = np.uint16

        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.out = np.add(self.x, self.y)

        self.axis = -1

        self.inputs = {
140 141 142 143 144 145
            'X': OpTest.np_dtype_to_fluid_dtype(
                convert_float_to_uint16(self.x)
            ),
            'Y': OpTest.np_dtype_to_fluid_dtype(
                convert_float_to_uint16(self.y)
            ),
146 147 148 149 150 151
        }
        self.attrs = {'axis': self.axis, 'use_mkldnn': False}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}

    def test_check_output(self):
        place = core.CUDAPlace(0)
H
hong 已提交
152
        self.check_output_with_place(place, check_eager=False)
153 154 155

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
H
hong 已提交
156
        self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_eager=False)
157 158 159

    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
160 161 162
        self.check_grad_with_place(
            place, ['Y'], 'Out', no_grad_set=set("X"), check_eager=False
        )
163 164 165

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
166 167 168
        self.check_grad_with_place(
            place, ['X'], 'Out', no_grad_set=set('Y'), check_eager=False
        )
169 170


171
@skip_check_grad_ci(
172 173
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
174
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
K
Kexin Zhao 已提交
175 176 177 178 179 180
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y


181
@skip_check_grad_ci(
182 183
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
184 185 186 187 188
class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y
189 190


191
@skip_check_grad_ci(
192 193
    reason="[skip shape check] Use y_shape(1,1) to test broadcast."
)
K
Kexin Zhao 已提交
194
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
195 196 197 198 199 200
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y


201
@skip_check_grad_ci(
202 203
    reason="[skip shape check] Use y_shape(1,1) to test broadcast."
)
K
Kexin Zhao 已提交
204 205 206 207 208
class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y
209 210


K
Kexin Zhao 已提交
211
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
K
Kexin Zhao 已提交
212
    def init_input_output(self):
213 214
        self.x = np.random.random((100,)).astype(self.dtype)
        self.y = np.random.random((100,)).astype(self.dtype)
K
Kexin Zhao 已提交
215 216 217 218 219
        self.out = np.add(self.x, self.y)


class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
    def init_input_output(self):
220 221
        self.x = np.random.random((100,)).astype(self.dtype)
        self.y = np.random.random((100,)).astype(self.dtype)
K
Kexin Zhao 已提交
222
        self.out = np.add(self.x, self.y)
G
gongweibao 已提交
223 224


K
Kexin Zhao 已提交
225
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
226
    def init_input_output(self):
227 228 229
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
G
gongweibao 已提交
230

K
Kexin Zhao 已提交
231 232 233 234 235 236
    def init_axis(self):
        self.axis = 0


class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
237 238 239
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
K
Kexin Zhao 已提交
240 241 242

    def init_axis(self):
        self.axis = 0
G
gongweibao 已提交
243 244


K
Kexin Zhao 已提交
245
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
246
    def init_input_output(self):
247 248 249
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
G
gongweibao 已提交
250

K
Kexin Zhao 已提交
251 252 253 254 255 256
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
257 258 259
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
K
Kexin Zhao 已提交
260 261 262

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
263 264


K
Kexin Zhao 已提交
265
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
266
    def init_input_output(self):
267 268 269
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
270

K
Kexin Zhao 已提交
271 272 273

class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp):
    def init_input_output(self):
274 275 276
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
277 278


K
Kexin Zhao 已提交
279
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
K
Kexin Zhao 已提交
280
    def init_input_output(self):
281
        self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype)
282 283
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
284

K
Kexin Zhao 已提交
285 286 287 288 289 290
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp):
    def init_input_output(self):
291 292 293
        self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
K
Kexin Zhao 已提交
294 295 296

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
297 298


K
Kexin Zhao 已提交
299
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
K
Kexin Zhao 已提交
300
    def init_input_output(self):
301
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
302 303
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
304 305 306

    def init_axis(self):
        self.axis = 0
307

K
Kexin Zhao 已提交
308 309 310

class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp):
    def init_input_output(self):
311
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
312 313
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
314 315 316

    def init_axis(self):
        self.axis = 0
317 318


319 320
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
    def init_input_output(self):
321 322
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
323 324 325 326 327
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp):
    def init_input_output(self):
328 329
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
330 331 332 333 334
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
    def init_input_output(self):
335 336
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
337 338 339 340 341 342 343
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
        self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
344 345 346 347 348
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp):
    def init_input_output(self):
349 350
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
351 352 353
        self.out = self.x + self.y


K
Kexin Zhao 已提交
354
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
355
    def init_input_output(self):
356 357 358
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
Q
qijun 已提交
359

K
Kexin Zhao 已提交
360 361 362 363 364 365
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
366 367 368
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
K
Kexin Zhao 已提交
369 370 371

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
372 373


374
@skip_check_grad_ci(
375 376
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
377
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
378
    def init_input_output(self):
379
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
380 381
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)
Q
qijun 已提交
382

K
Kexin Zhao 已提交
383 384 385 386
    def init_axis(self):
        self.axis = 1


387
@skip_check_grad_ci(
388 389
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
K
Kexin Zhao 已提交
390 391
class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
392
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
393 394 395 396 397
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
398 399


400 401
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
    def init_input_output(self):
402 403
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
404 405 406 407 408 409 410 411
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp):
    def init_input_output(self):
412 413
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
414 415 416 417 418 419
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


420 421
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
    def init_input_output(self):
422 423
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
424 425 426 427 428 429
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


430 431
class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
432
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
433 434 435 436 437 438 439
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


440 441
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
    def init_input_output(self):
442 443
        self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
444 445 446 447 448 449 450 451
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
    def init_input_output(self):
452
        self.x = np.random.rand(10, 12).astype(self.dtype)
453
        self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype)
454 455 456 457 458 459
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 2


460 461 462
class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(10, 1, 12).astype(self.dtype)
463
        self.y = np.random.rand(10, 2, 12).astype(self.dtype)
464 465 466 467 468 469
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 0


470
class TestElementwiseAddOpError(unittest.TestCase):
471 472 473
    def test_errors(self):
        with program_guard(Program(), Program()):
            # the input of elementwise_add must be Variable.
474 475 476 477 478 479
            x1 = fluid.create_lod_tensor(
                np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace()
            )
            y1 = fluid.create_lod_tensor(
                np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace()
            )
480 481 482 483 484 485 486 487 488
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1)

            # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64
            # float16 only can be set on GPU place
            x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8")
            y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8")
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2)


489 490 491 492
class TestAddApi(unittest.TestCase):
    def _executed_api(self, x, y, name=None):
        return paddle.add(x, y, name)

493 494 495 496 497
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')

498
            y_1 = self._executed_api(x, y, name='add_res')
499 500
            self.assertEqual(('add_res' in y_1.name), True)

Y
Yang Zhang 已提交
501
    def test_declarative(self):
502 503 504 505 506
        with fluid.program_guard(fluid.Program()):

            def gen_data():
                return {
                    "x": np.array([2, 3, 4]).astype('float32'),
507
                    "y": np.array([1, 5, 2]).astype('float32'),
508 509 510 511
                }

            x = fluid.data(name="x", shape=[3], dtype='float32')
            y = fluid.data(name="y", shape=[3], dtype='float32')
512
            z = self._executed_api(x, y)
513 514 515 516

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
517
            z_expected = np.array([3.0, 8.0, 6.0])
518 519 520 521 522 523 524 525
            self.assertEqual((z_value == z_expected).all(), True)

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            y = fluid.dygraph.to_variable(np_y)
526
            z = self._executed_api(x, y)
527
            np_z = z.numpy()
528
            z_expected = np.array([3.0, 8.0, 6.0])
529 530 531
            self.assertEqual((np_z == z_expected).all(), True)


532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
class TestAddInplaceApi(TestAddApi):
    def _executed_api(self, x, y, name=None):
        return x.add_(y, name)


class TestAddInplaceBroadcastSuccess(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 4).astype('float')
        self.y_numpy = np.random.rand(3, 4).astype('float')

    def test_broadcast_success(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)
        inplace_result = x.add_(y)
        numpy_result = self.x_numpy + self.y_numpy
        self.assertEqual((inplace_result.numpy() == numpy_result).all(), True)
        paddle.enable_static()


class TestAddInplaceBroadcastSuccess2(TestAddInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float')
        self.y_numpy = np.random.rand(3, 1).astype('float')


class TestAddInplaceBroadcastSuccess3(TestAddInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float')
        self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float')


class TestAddInplaceBroadcastError(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(3, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')

    def test_broadcast_errors(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)

        def broadcast_shape_error():
            x.add_(y)

        self.assertRaises(ValueError, broadcast_shape_error)
        paddle.enable_static()


class TestAddInplaceBroadcastError2(TestAddInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


595 596 597
class TestComplexElementwiseAddOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_add"
598 599
        self.dtype = np.float64
        self.shape = (2, 3, 4, 5)
600 601 602 603 604
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
605
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
606 607 608 609 610 611 612 613
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
614
        self.x = np.random.random(self.shape).astype(
615 616
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
617
        self.y = np.random.random(self.shape).astype(
618 619
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
620 621 622
        self.out = self.x + self.y

    def init_grad_input_output(self):
623 624 625
        self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones(
            self.shape, self.dtype
        )
626 627 628 629
        self.grad_x = self.grad_out
        self.grad_y = self.grad_out

    def test_check_output(self):
H
hong 已提交
630
        self.check_output(check_eager=False)
631 632

    def test_check_grad_normal(self):
633 634 635 636 637 638
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
639 640

    def test_check_grad_ingore_x(self):
641 642 643 644 645 646 647
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
648 649

    def test_check_grad_ingore_y(self):
650 651 652 653 654 655 656
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out],
        )
657 658


659 660 661 662
class TestRealComplexElementwiseAddOp(TestComplexElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
663 664
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
665 666 667
        self.out = self.x + self.y

    def init_grad_input_output(self):
668 669 670
        self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones(
            self.shape, self.dtype
        )
671 672 673 674
        self.grad_x = np.real(self.grad_out)
        self.grad_y = self.grad_out


675 676 677 678 679 680 681 682 683
class TestBoolAddFloatElementwiseAddop(unittest.TestCase):
    def test_static_add(self):
        paddle.enable_static()
        a = 1.5
        b = paddle.full([4, 5, 6], True, dtype='bool')
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)
        paddle.enable_static()

684
    def func_dygraph_add(self):
685 686
        paddle.disable_static()
        a = 1.5
687 688
        b = paddle.full([2], True, dtype='bool')
        # special case: scalar + tensor(bool)
689 690 691
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
        np_a = np.random.random((2, 3, 4)).astype(np.float64)
        np_b = np.random.random((2, 3, 4)).astype(np.float64)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: tensor + tensor
        expect_out = np_a + np_b
        actual_out = tensor_a + tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor + scalar
        expect_out = np_a + 1
        actual_out = tensor_a + 1
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: scalar + tenor
        expect_out = 1 + np_a
        actual_out = 1 + tensor_a
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()

    def test_dygraph_add(self):
        with _test_eager_guard():
            self.func_dygraph_add()
        self.func_dygraph_add()

720

G
gongweibao 已提交
721
if __name__ == '__main__':
722
    paddle.enable_static()
G
gongweibao 已提交
723
    unittest.main()