test_elementwise_div_op.py 16.8 KB
Newer Older
1
#  Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

G
gongweibao 已提交
15
import unittest
16

G
gongweibao 已提交
17
import numpy as np
18
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
19

20 21 22
import paddle
from paddle import fluid
from paddle.fluid import core
G
gongweibao 已提交
23 24


25 26 27 28 29 30 31
def broadcast_wrapper(shape=[1, 10, 12, 1]):
    def div_wrapper(x, y, axis=-1):
        return paddle.divide(x, y.reshape(shape))

    return div_wrapper


G
gongweibao 已提交
32 33 34
class ElementwiseDivOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
35
        self.python_api = paddle.divide
36
        self.prim_op_type = "prim"
37
        self.init_args()
W
Wu Yi 已提交
38
        self.init_dtype()
39
        self.init_shape()
40
        self.if_check_prim()
41
        self.if_enable_cinn()
42 43 44 45 46 47

        x = self.gen_data(self.x_shape).astype(self.val_dtype)
        y = self.gen_data(self.y_shape).astype(self.val_dtype)
        out = self.compute_output(x, y).astype(self.val_dtype)
        grad_out = np.ones(out.shape).astype(self.val_dtype)
        grad_x = self.compute_gradient_x(grad_out, y).astype(self.val_dtype)
48 49 50
        grad_y = self.compute_gradient_y(grad_out, out, y).astype(
            self.val_dtype
        )
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66

        # Convert np.float32 data to np.uint16 for bfloat16 Paddle OP
        if self.dtype == np.uint16:
            x = convert_float_to_uint16(x)
            y = convert_float_to_uint16(y)
            out = convert_float_to_uint16(out)
            grad_out = convert_float_to_uint16(grad_out)
            grad_x = convert_float_to_uint16(grad_x)
            grad_y = convert_float_to_uint16(grad_y)

        self.inputs = {'X': x, 'Y': y}
        self.outputs = {'Out': out}
        self.grad_out = grad_out
        self.grad_x = grad_x
        self.grad_y = grad_y

67
    def if_enable_cinn(self):
68
        pass
69

70 71 72
    def init_args(self):
        self.check_dygraph = True
        self.place = None
H
hong 已提交
73

74 75 76
    def init_dtype(self):
        self.dtype = np.float64
        self.val_dtype = np.float64
G
gongweibao 已提交
77

78 79 80
    def init_shape(self):
        self.x_shape = [13, 17]
        self.y_shape = [13, 17]
H
hong 已提交
81

82 83 84
    def if_check_prim(self):
        self.check_prim = True

85 86
    def gen_data(self, shape):
        return np.random.uniform(0.1, 1, shape)
G
gongweibao 已提交
87

88 89
    def compute_output(self, x, y):
        return x / y
G
gongweibao 已提交
90

91 92
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y
G
gongweibao 已提交
93

94 95
    def compute_gradient_y(self, grad_out, out, y):
        return -1 * grad_out * out / y
G
gongweibao 已提交
96

97 98 99 100 101 102 103 104
    def test_check_output(self):
        if self.place is None:
            self.check_output()
        else:
            self.check_output_with_place(self.place)

    def test_check_gradient(self):
        check_list = []
105 106 107 108 109 110 111 112 113 114 115 116 117
        check_list.append(
            {
                'grad': ['X', 'Y'],
                'no_grad': None,
                'val_grad': [self.grad_x, self.grad_y],
            }
        )
        check_list.append(
            {'grad': ['Y'], 'no_grad': set('X'), 'val_grad': [self.grad_y]}
        )
        check_list.append(
            {'grad': ['X'], 'no_grad': set('Y'), 'val_grad': [self.grad_x]}
        )
118 119 120 121 122 123
        for check_option in check_list:
            check_args = [check_option['grad'], 'Out']
            check_kwargs = {
                'no_grad_set': check_option['no_grad'],
                'user_defined_grads': check_option['val_grad'],
                'user_defined_grad_outputs': [self.grad_out],
124
                'check_dygraph': self.check_dygraph,
125
                'check_prim': self.check_prim,
126 127 128 129 130 131
            }
            if self.place is None:
                self.check_grad(*check_args, **check_kwargs)
            else:
                check_args.insert(0, self.place)
                self.check_grad_with_place(*check_args, **check_kwargs)
W
Wu Yi 已提交
132

G
gongweibao 已提交
133

134 135 136 137 138
class TestElementwiseDivPrimOpFp32(ElementwiseDivOp):
    def init_dtype(self):
        self.dtype = np.float32
        self.val_dtype = np.float32

139
    def if_enable_cinn(self):
140 141 142
        pass


143 144 145 146 147
class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = []
        self.y_shape = []

148
    def if_enable_cinn(self):
149 150
        self.enable_cinn = False

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165

class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [13, 17]
        self.y_shape = []

    def compute_output(self, x, y):
        return x / y.reshape([1, 1])

    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape([1, 1])

    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape([1, 1]))

166
    def if_enable_cinn(self):
167 168
        self.enable_cinn = False

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183

class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = []
        self.y_shape = [13, 17]

    def compute_output(self, x, y):
        return x.reshape([1, 1]) / y

    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y)

    def compute_gradient_y(self, grad_out, out, y):
        return -1 * grad_out * out / y

184
    def if_enable_cinn(self):
185 186
        self.enable_cinn = False

187

188 189 190 191 192
@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
193 194 195 196 197 198 199
class TestElementwiseDivOpBF16(ElementwiseDivOp):
    def init_args(self):
        # In due to output data type inconsistence of bfloat16 paddle op, we disable the dygraph check.
        self.check_dygraph = False
        self.place = core.CUDAPlace(0)

    def init_dtype(self):
200
        self.dtype = np.uint16
201
        self.val_dtype = np.float32
202

203 204 205
    def init_shape(self):
        self.x_shape = [12, 13]
        self.y_shape = [12, 13]
206

207 208 209 210
    # elementwise_pow does't support bfloat16
    def if_check_prim(self):
        self.check_prim = False

211

212
@skip_check_grad_ci(
213 214
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
215 216 217 218
class TestElementwiseDivOpScalar(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [20, 3, 4]
        self.y_shape = [1]
219

220 221
    def compute_gradient_y(self, grad_out, out, y):
        return np.array([np.sum(-1 * grad_out * out / y)])
222 223


224 225 226 227
class TestElementwiseDivOpVector(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [100]
        self.y_shape = [100]
228

229

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
class TestElementwiseDivOpNoPrim(ElementwiseDivOp):
    def test_check_gradient(self):
        check_list = []
        check_list.append(
            {
                'grad': ['X', 'Y'],
                'no_grad': None,
                'val_grad': [self.grad_x, self.grad_y],
            }
        )
        check_list.append(
            {'grad': ['Y'], 'no_grad': set('X'), 'val_grad': [self.grad_y]}
        )
        check_list.append(
            {'grad': ['X'], 'no_grad': set('Y'), 'val_grad': [self.grad_x]}
        )
        for check_option in check_list:
            check_args = [check_option['grad'], 'Out']
            check_kwargs = {
                'no_grad_set': check_option['no_grad'],
                'user_defined_grads': check_option['val_grad'],
                'user_defined_grad_outputs': [self.grad_out],
                'check_dygraph': self.check_dygraph,
            }
            if self.place is None:
                self.check_grad(*check_args, **check_kwargs)
            else:
                check_args.insert(0, self.place)
                self.check_grad_with_place(*check_args, **check_kwargs)


class TestElementwiseDivOpBroadcast0(TestElementwiseDivOpNoPrim):
262 263 264 265
    def init_shape(self):
        self.x_shape = [100, 3, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 0}
266
        self.python_api = broadcast_wrapper(shape=[100, 1, 1])
267

268 269
    def compute_output(self, x, y):
        return x / y.reshape(100, 1, 1)
270

271 272
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(100, 1, 1)
G
gongweibao 已提交
273

274 275
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(100, 1, 1), axis=(1, 2))
G
gongweibao 已提交
276

277

278
class TestElementwiseDivOpBroadcast1(TestElementwiseDivOpNoPrim):
279 280 281 282
    def init_shape(self):
        self.x_shape = [2, 100, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 1}
283
        self.python_api = broadcast_wrapper(shape=[1, 100, 1])
G
gongweibao 已提交
284

285 286
    def compute_output(self, x, y):
        return x / y.reshape(1, 100, 1)
G
gongweibao 已提交
287

288 289
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 100, 1)
290

291 292
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 100, 1), axis=(0, 2))
G
gongweibao 已提交
293 294


295
class TestElementwiseDivOpBroadcast2(TestElementwiseDivOpNoPrim):
296 297 298
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [100]
299
        self.python_api = broadcast_wrapper(shape=[1, 1, 100])
300

301 302
    def compute_output(self, x, y):
        return x / y.reshape(1, 1, 100)
G
gongweibao 已提交
303

304 305
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 1, 100)
G
gongweibao 已提交
306

307 308
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 1, 100), axis=(0, 1))
G
gongweibao 已提交
309

310

311
class TestElementwiseDivOpBroadcast3(TestElementwiseDivOpNoPrim):
312 313 314
    def init_shape(self):
        self.x_shape = [2, 10, 12, 5]
        self.y_shape = [10, 12]
G
gongweibao 已提交
315
        self.attrs = {'axis': 1}
316
        self.python_api = broadcast_wrapper(shape=[1, 10, 12, 1])
G
gongweibao 已提交
317

318 319
    def compute_output(self, x, y):
        return x / y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
320

321 322
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 10, 12, 1)
323

324
    def compute_gradient_y(self, grad_out, out, y):
325 326 327
        return np.sum(
            -1 * grad_out * out / y.reshape(1, 10, 12, 1), axis=(0, 3)
        )
328 329


330 331 332 333
class TestElementwiseDivOpBroadcast4(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 50]
        self.y_shape = [2, 1, 50]
334

335 336
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1)).reshape(2, 1, 50)
337

338

339 340 341 342
class TestElementwiseDivOpBroadcast5(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 4, 20]
        self.y_shape = [2, 3, 1, 20]
343

344 345
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(2)).reshape(2, 3, 1, 20)
346

347

348 349 350 351
class TestElementwiseDivOpCommonuse1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [1, 1, 100]
352

353 354 355 356 357 358 359 360 361 362 363
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(0, 1)).reshape(1, 1, 100)


class TestElementwiseDivOpCommonuse2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [30, 3, 1, 5]
        self.y_shape = [30, 1, 4, 1]

    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(2)).reshape(30, 3, 1, 5)
364

365 366 367 368 369 370 371 372
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1, 3)).reshape(30, 1, 4, 1)


class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [10, 12]
        self.y_shape = [2, 3, 10, 12]
373 374
        self.attrs = {'axis': 2}

375 376
    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(0, 1))
377

378
    def if_enable_cinn(self):
379 380
        self.enable_cinn = False

381

382
class TestElementwiseDivOpInt(ElementwiseDivOp):
383
    def init_dtype(self):
384
        self.dtype = np.int32
385
        self.val_dtype = np.int32
386

387 388
    def gen_data(self, shape):
        return np.random.randint(1, 5, size=shape)
389

390 391
    def compute_output(self, x, y):
        return x // y
392 393


394 395 396
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
W
Wu Yi 已提交
397 398 399
class TestElementwiseDivOpFp16(ElementwiseDivOp):
    def init_dtype(self):
        self.dtype = np.float16
400
        self.val_dtype = np.float16
W
Wu Yi 已提交
401

402
    def if_enable_cinn(self):
403 404
        self.enable_cinn = False

W
Wu Yi 已提交
405

406 407 408
class TestElementwiseDivBroadcast(unittest.TestCase):
    def test_shape_with_batch_sizes(self):
        with fluid.program_guard(fluid.Program()):
409 410 411 412
            x_var = fluid.data(
                name='x', dtype='float32', shape=[None, 3, None, None]
            )
            one = 2.0
413 414 415
            out = one / x_var
            exe = fluid.Executor(fluid.CPUPlace())
            x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32")
416
            (out_result,) = exe.run(feed={'x': x}, fetch_list=[out])
417 418 419
            self.assertEqual((out_result == (2 / x)).all(), True)


S
ShenLiang 已提交
420 421 422 423 424
class TestDivideOp(unittest.TestCase):
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')
425

S
ShenLiang 已提交
426 427
            y_1 = paddle.divide(x, y, name='div_res')
            self.assertEqual(('div_res' in y_1.name), True)
428 429

    def test_dygraph(self):
S
ShenLiang 已提交
430 431 432 433 434 435 436
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = paddle.divide(x, y)
            np_z = z.numpy()
437
            z_expected = np.array([2.0, 0.6, 2.0])
S
ShenLiang 已提交
438
            self.assertEqual((np_z == z_expected).all(), True)
439 440


441 442 443
class TestComplexElementwiseDivOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
444
        self.python_api = paddle.divide
445 446 447 448 449 450
        self.init_base_dtype()
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
451
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
452 453 454 455 456 457 458 459
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
460 461 462 463 464 465
        self.x = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
466 467 468
        self.out = self.x / self.y

    def init_grad_input_output(self):
469 470 471
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
472 473 474 475
        self.grad_x = self.grad_out / np.conj(self.y)
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)

    def test_check_output(self):
476
        self.check_output()
477 478

    def test_check_grad_normal(self):
479 480 481 482 483 484
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
485 486

    def test_check_grad_ingore_x(self):
487 488 489 490 491 492 493
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
494 495

    def test_check_grad_ingore_y(self):
496 497 498 499 500 501 502
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out],
        )
503 504


C
chentianyu03 已提交
505 506 507
class TestRealComplexElementwiseDivOp(TestComplexElementwiseDivOp):
    def init_input_output(self):
        self.x = np.random.random((2, 3, 4, 5)).astype(self.dtype)
508 509 510
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
C
chentianyu03 已提交
511 512 513
        self.out = self.x / self.y

    def init_grad_input_output(self):
514 515 516
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
C
chentianyu03 已提交
517 518 519 520
        self.grad_x = np.real(self.grad_out / np.conj(self.y))
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)


521
class TestElementwiseDivop(unittest.TestCase):
522
    def test_dygraph_div(self):
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
        paddle.disable_static()

        np_a = np.random.random((2, 3, 4)).astype(np.float32)
        np_b = np.random.random((2, 3, 4)).astype(np.float32)
        np_a[np.abs(np_a) < 0.0005] = 0.002
        np_b[np.abs(np_b) < 0.0005] = 0.002

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: nparray / tenor
        expect_out = np_a / np_b
        actual_out = np_a / tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor / nparray
        actual_out = tensor_a / np_b
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()


G
gongweibao 已提交
545
if __name__ == '__main__':
546
    paddle.enable_static()
G
gongweibao 已提交
547
    unittest.main()