test_elementwise_add_op.py 22.4 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
G
gongweibao 已提交
16 17
import unittest
import numpy as np
18
import paddle
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
20
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
21 22
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
G
gongweibao 已提交
23 24


K
Kexin Zhao 已提交
25
class TestElementwiseAddOp(OpTest):
26 27 28
    def init_kernel_type(self):
        self.use_mkldnn = False

G
gongweibao 已提交
29 30
    def setUp(self):
        self.op_type = "elementwise_add"
K
Kexin Zhao 已提交
31 32
        self.init_dtype()
        self.init_input_output()
33
        self.init_kernel_type()
K
Kexin Zhao 已提交
34
        self.init_axis()
K
Kexin Zhao 已提交
35

G
gongweibao 已提交
36
        self.inputs = {
K
Kexin Zhao 已提交
37 38
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
G
gongweibao 已提交
39
        }
40
        self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
K
Kexin Zhao 已提交
41
        self.outputs = {'Out': self.out}
G
gongweibao 已提交
42

H
hong 已提交
43 44 45
    def check_eager(self):
        return (self.use_mkldnn == False and self.axis == -1)

G
gongweibao 已提交
46
    def test_check_output(self):
47
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
H
hong 已提交
48 49 50
        self.check_output(
            check_dygraph=(self.use_mkldnn == False),
            check_eager=self.check_eager())
G
gongweibao 已提交
51 52

    def test_check_grad_normal(self):
53
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
54 55
        if self.dtype == np.float16:
            return
56
        self.check_grad(
H
hong 已提交
57 58 59 60
            ['X', 'Y'],
            'Out',
            check_dygraph=(self.use_mkldnn == False),
            check_eager=self.check_eager())
G
gongweibao 已提交
61 62

    def test_check_grad_ingore_x(self):
63
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
64 65
        if self.dtype == np.float16:
            return
G
gongweibao 已提交
66
        self.check_grad(
67 68 69
            ['Y'],
            'Out',
            no_grad_set=set("X"),
H
hong 已提交
70 71
            check_dygraph=(self.use_mkldnn == False),
            check_eager=self.check_eager())
G
gongweibao 已提交
72 73

    def test_check_grad_ingore_y(self):
74
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
75 76
        if self.dtype == np.float16:
            return
G
gongweibao 已提交
77
        self.check_grad(
78 79 80
            ['X'],
            'Out',
            no_grad_set=set('Y'),
H
hong 已提交
81 82
            check_dygraph=(self.use_mkldnn == False),
            check_eager=self.check_eager())
G
gongweibao 已提交
83

K
Kexin Zhao 已提交
84 85 86 87 88 89
    def init_input_output(self):
        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
        self.out = np.add(self.x, self.y)

    def init_dtype(self):
90
        self.dtype = np.float64
K
Kexin Zhao 已提交
91 92

    def init_axis(self):
93
        self.axis = -1
K
Kexin Zhao 已提交
94 95


96 97
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
K
Kexin Zhao 已提交
98
class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
K
Kexin Zhao 已提交
99
    def init_dtype(self):
K
Kexin Zhao 已提交
100 101 102
        self.dtype = np.float16

    def test_check_output(self):
103
        # TODO(wangzhongpu): support mkldnn op in dygraph mode
K
Kexin Zhao 已提交
104 105 106
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
107 108
                self.check_output_with_place(
                    place, atol=1e-3, check_dygraph=(self.use_mkldnn == False))
K
Kexin Zhao 已提交
109

G
gongweibao 已提交
110

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
@unittest.skipIf(
    not core.is_compiled_with_cuda() or core.cudnn_version() < 8100,
    "core is not compiled with CUDA and cudnn version need larger than 8.1.0")
class TestBF16ElementwiseAddOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_add"
        self.dtype = np.uint16

        self.x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        self.out = np.add(self.x, self.y)

        self.axis = -1

        self.inputs = {
            'X':
            OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.x)),
            'Y':
            OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(self.y))
        }
        self.attrs = {'axis': self.axis, 'use_mkldnn': False}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}

    def test_check_output(self):
        place = core.CUDAPlace(0)
H
hong 已提交
136
        self.check_output_with_place(place, check_eager=False)
137 138 139

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
H
hong 已提交
140
        self.check_grad_with_place(place, ['X', 'Y'], 'Out', check_eager=False)
141 142 143

    def test_check_grad_ingore_x(self):
        place = core.CUDAPlace(0)
H
hong 已提交
144 145
        self.check_grad_with_place(
            place, ['Y'], 'Out', no_grad_set=set("X"), check_eager=False)
146 147 148

    def test_check_grad_ingore_y(self):
        place = core.CUDAPlace(0)
H
hong 已提交
149 150
        self.check_grad_with_place(
            place, ['X'], 'Out', no_grad_set=set('Y'), check_eager=False)
151 152


153 154
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
155
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
K
Kexin Zhao 已提交
156 157 158 159 160 161
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y


162 163
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
164 165 166 167 168
class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y
169 170


171 172
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
K
Kexin Zhao 已提交
173
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
174 175 176 177 178 179
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y


180 181
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
K
Kexin Zhao 已提交
182 183 184 185 186
class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(2, 3, 4).astype(self.dtype)
        self.y = np.random.rand(1, 1).astype(self.dtype)
        self.out = self.x + self.y
187 188


K
Kexin Zhao 已提交
189
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
K
Kexin Zhao 已提交
190
    def init_input_output(self):
191 192
        self.x = np.random.random((100, )).astype(self.dtype)
        self.y = np.random.random((100, )).astype(self.dtype)
K
Kexin Zhao 已提交
193 194 195 196 197
        self.out = np.add(self.x, self.y)


class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
    def init_input_output(self):
198 199
        self.x = np.random.random((100, )).astype(self.dtype)
        self.y = np.random.random((100, )).astype(self.dtype)
K
Kexin Zhao 已提交
200
        self.out = np.add(self.x, self.y)
G
gongweibao 已提交
201 202


K
Kexin Zhao 已提交
203
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
204
    def init_input_output(self):
205 206 207
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
G
gongweibao 已提交
208

K
Kexin Zhao 已提交
209 210 211 212 213 214
    def init_axis(self):
        self.axis = 0


class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
215 216 217
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1)
K
Kexin Zhao 已提交
218 219 220

    def init_axis(self):
        self.axis = 0
G
gongweibao 已提交
221 222


K
Kexin Zhao 已提交
223
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
224
    def init_input_output(self):
225 226 227
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
G
gongweibao 已提交
228

K
Kexin Zhao 已提交
229 230 231 232 233 234
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
235 236 237
        self.x = np.random.rand(2, 100, 3).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 100, 1)
K
Kexin Zhao 已提交
238 239 240

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
241 242


K
Kexin Zhao 已提交
243
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
K
Kexin Zhao 已提交
244
    def init_input_output(self):
245 246 247
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
248

K
Kexin Zhao 已提交
249 250 251

class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp):
    def init_input_output(self):
252 253 254
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(100).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1, 100)
G
gongweibao 已提交
255 256


K
Kexin Zhao 已提交
257
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
K
Kexin Zhao 已提交
258
    def init_input_output(self):
259
        self.x = np.random.rand(2, 10, 12, 1).astype(self.dtype)
260 261
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
262

K
Kexin Zhao 已提交
263 264 265 266 267 268
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp):
    def init_input_output(self):
269 270 271
        self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12, 1)
K
Kexin Zhao 已提交
272 273 274

    def init_axis(self):
        self.axis = 1
G
gongweibao 已提交
275 276


K
Kexin Zhao 已提交
277
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
K
Kexin Zhao 已提交
278
    def init_input_output(self):
279
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
280 281
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
282 283 284

    def init_axis(self):
        self.axis = 0
285

K
Kexin Zhao 已提交
286 287 288

class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp):
    def init_input_output(self):
289
        self.x = np.random.rand(100, 2, 1, 2).astype(self.dtype)
290 291
        self.y = np.random.rand(100, 1).astype(self.dtype)
        self.out = self.x + self.y.reshape(100, 1, 1, 1)
K
Kexin Zhao 已提交
292 293 294

    def init_axis(self):
        self.axis = 0
295 296


297 298
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
    def init_input_output(self):
299 300
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
301 302 303 304 305
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_5(TestFP16ElementwiseAddOp):
    def init_input_output(self):
306 307
        self.x = np.random.rand(10, 3, 12).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12).astype(self.dtype)
308 309 310 311 312
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
    def init_input_output(self):
313 314
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
315 316 317 318 319 320 321
        self.out = self.x + self.y


class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
        self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
322 323 324 325 326
        self.out = self.x + self.y


class TestFP16ElementwiseAddOp_broadcast_6(TestFP16ElementwiseAddOp):
    def init_input_output(self):
327 328
        self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
        self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
329 330 331
        self.out = self.x + self.y


K
Kexin Zhao 已提交
332
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
K
Kexin Zhao 已提交
333
    def init_input_output(self):
334 335 336
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
Q
qijun 已提交
337

K
Kexin Zhao 已提交
338 339 340 341 342 343
    def init_axis(self):
        self.axis = 1


class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
    def init_input_output(self):
344 345 346
        self.x = np.random.rand(2, 10, 12).astype(self.dtype)
        self.y = np.random.rand(10, 12).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 10, 12)
K
Kexin Zhao 已提交
347 348 349

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
350 351


352 353
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
354
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
K
Kexin Zhao 已提交
355
    def init_input_output(self):
356
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
357 358
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)
Q
qijun 已提交
359

K
Kexin Zhao 已提交
360 361 362 363
    def init_axis(self):
        self.axis = 1


364 365
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
K
Kexin Zhao 已提交
366 367
class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
368
        self.x = np.random.rand(100, 1).astype(self.dtype)
K
Kexin Zhao 已提交
369 370 371 372 373
        self.y = np.random.rand(1).astype(self.dtype)
        self.out = self.x + self.y.reshape(1, 1)

    def init_axis(self):
        self.axis = 1
Q
qijun 已提交
374 375


376 377
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
    def init_input_output(self):
378 379
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
380 381 382 383 384 385 386 387
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp):
    def init_input_output(self):
388 389
        self.x = np.random.rand(100, 2, 3).astype(self.dtype)
        self.y = np.random.rand(100, 1, 1).astype(self.dtype)
390 391 392 393 394 395
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


396 397
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
    def init_input_output(self):
398 399
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
400 401 402 403 404 405
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


406 407
class TestElementwiseFP16AddOp_commonuse_add1(TestFP16ElementwiseAddOp):
    def init_input_output(self):
408
        self.x = np.random.rand(2, 3, 100).astype(self.dtype)
409 410 411 412 413 414 415
        self.y = np.random.rand(1, 1, 100).astype(self.dtype)
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


416 417
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
    def init_input_output(self):
418 419
        self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
        self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
420 421 422 423 424 425 426 427
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = -1


class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
    def init_input_output(self):
428
        self.x = np.random.rand(10, 12).astype(self.dtype)
429
        self.y = np.random.rand(2, 2, 10, 12).astype(self.dtype)
430 431 432 433 434 435
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 2


436 437 438
class TestElementwiseAddOp_same_shape_ysize_large(TestElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.rand(10, 1, 12).astype(self.dtype)
439
        self.y = np.random.rand(10, 2, 12).astype(self.dtype)
440 441 442 443 444 445
        self.out = self.x + self.y

    def init_axis(self):
        self.axis = 0


446
class TestElementwiseAddOpError(unittest.TestCase):
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
    def test_errors(self):
        with program_guard(Program(), Program()):
            # the input of elementwise_add must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
            y1 = fluid.create_lod_tensor(
                np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1)

            # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64
            # float16 only can be set on GPU place
            x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8")
            y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8")
            self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2)


463 464 465 466
class TestAddApi(unittest.TestCase):
    def _executed_api(self, x, y, name=None):
        return paddle.add(x, y, name)

467 468 469 470 471
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')

472
            y_1 = self._executed_api(x, y, name='add_res')
473 474
            self.assertEqual(('add_res' in y_1.name), True)

Y
Yang Zhang 已提交
475
    def test_declarative(self):
476 477 478 479 480 481 482 483 484 485
        with fluid.program_guard(fluid.Program()):

            def gen_data():
                return {
                    "x": np.array([2, 3, 4]).astype('float32'),
                    "y": np.array([1, 5, 2]).astype('float32')
                }

            x = fluid.data(name="x", shape=[3], dtype='float32')
            y = fluid.data(name="y", shape=[3], dtype='float32')
486
            z = self._executed_api(x, y)
487 488 489 490

            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
Y
Yang Zhang 已提交
491
            z_expected = np.array([3., 8., 6.])
492 493 494 495 496 497 498 499
            self.assertEqual((z_value == z_expected).all(), True)

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            y = fluid.dygraph.to_variable(np_y)
500
            z = self._executed_api(x, y)
501
            np_z = z.numpy()
Y
Yang Zhang 已提交
502
            z_expected = np.array([3., 8., 6.])
503 504 505
            self.assertEqual((np_z == z_expected).all(), True)


506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
class TestAddInplaceApi(TestAddApi):
    def _executed_api(self, x, y, name=None):
        return x.add_(y, name)


class TestAddInplaceBroadcastSuccess(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 4).astype('float')
        self.y_numpy = np.random.rand(3, 4).astype('float')

    def test_broadcast_success(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)
        inplace_result = x.add_(y)
        numpy_result = self.x_numpy + self.y_numpy
        self.assertEqual((inplace_result.numpy() == numpy_result).all(), True)
        paddle.enable_static()


class TestAddInplaceBroadcastSuccess2(TestAddInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float')
        self.y_numpy = np.random.rand(3, 1).astype('float')


class TestAddInplaceBroadcastSuccess3(TestAddInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float')
        self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float')


class TestAddInplaceBroadcastError(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(3, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')

    def test_broadcast_errors(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)

        def broadcast_shape_error():
            x.add_(y)

        self.assertRaises(ValueError, broadcast_shape_error)
        paddle.enable_static()


class TestAddInplaceBroadcastError2(TestAddInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


class TestAddInplaceBroadcastError3(TestAddInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


569 570 571
class TestComplexElementwiseAddOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_add"
572 573
        self.dtype = np.float64
        self.shape = (2, 3, 4, 5)
574 575 576 577 578 579 580 581 582 583 584 585 586 587
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
588 589 590 591
        self.x = np.random.random(self.shape).astype(
            self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
            self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
592 593 594
        self.out = self.x + self.y

    def init_grad_input_output(self):
595 596
        self.grad_out = np.ones(self.shape, self.dtype) + 1J * np.ones(
            self.shape, self.dtype)
597 598 599 600
        self.grad_x = self.grad_out
        self.grad_y = self.grad_out

    def test_check_output(self):
H
hong 已提交
601
        self.check_output(check_eager=False)
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626

    def test_check_grad_normal(self):
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out])

    def test_check_grad_ingore_x(self):
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out])

    def test_check_grad_ingore_y(self):
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out])


627 628 629 630 631 632 633 634 635 636 637 638 639 640
class TestRealComplexElementwiseAddOp(TestComplexElementwiseAddOp):
    def init_input_output(self):
        self.x = np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
            self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
        self.out = self.x + self.y

    def init_grad_input_output(self):
        self.grad_out = np.ones(self.shape, self.dtype) + 1J * np.ones(
            self.shape, self.dtype)
        self.grad_x = np.real(self.grad_out)
        self.grad_y = self.grad_out


641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
class TestBoolAddFloatElementwiseAddop(unittest.TestCase):
    def test_static_add(self):
        paddle.enable_static()
        a = 1.5
        b = paddle.full([4, 5, 6], True, dtype='bool')
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)
        paddle.enable_static()

    def test_dygraph_add(self):
        paddle.disable_static()
        a = 1.5
        b = paddle.full([4, 5, 6], True, dtype='bool')
        c = a + b
        self.assertTrue(c.dtype == core.VarDesc.VarType.FP32)


G
gongweibao 已提交
658
if __name__ == '__main__':
659
    paddle.enable_static()
G
gongweibao 已提交
660
    unittest.main()