test_elementwise_div_op.py 14.6 KB
Newer Older
1
#  Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

G
gongweibao 已提交
15 16
import unittest
import numpy as np
17
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
18 19 20
import paddle
from paddle import fluid
from paddle.fluid import core
21
from paddle.fluid.framework import _test_eager_guard
G
gongweibao 已提交
22 23 24 25 26


class ElementwiseDivOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
27
        self.python_api = paddle.divide
28
        self.init_args()
W
Wu Yi 已提交
29
        self.init_dtype()
30 31 32 33 34 35 36
        self.init_shape()

        x = self.gen_data(self.x_shape).astype(self.val_dtype)
        y = self.gen_data(self.y_shape).astype(self.val_dtype)
        out = self.compute_output(x, y).astype(self.val_dtype)
        grad_out = np.ones(out.shape).astype(self.val_dtype)
        grad_x = self.compute_gradient_x(grad_out, y).astype(self.val_dtype)
37 38 39
        grad_y = self.compute_gradient_y(grad_out, out, y).astype(
            self.val_dtype
        )
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58

        # Convert np.float32 data to np.uint16 for bfloat16 Paddle OP
        if self.dtype == np.uint16:
            x = convert_float_to_uint16(x)
            y = convert_float_to_uint16(y)
            out = convert_float_to_uint16(out)
            grad_out = convert_float_to_uint16(grad_out)
            grad_x = convert_float_to_uint16(grad_x)
            grad_y = convert_float_to_uint16(grad_y)

        self.inputs = {'X': x, 'Y': y}
        self.outputs = {'Out': out}
        self.grad_out = grad_out
        self.grad_x = grad_x
        self.grad_y = grad_y

    def init_args(self):
        self.check_dygraph = True
        self.place = None
H
hong 已提交
59

60 61 62
    def init_dtype(self):
        self.dtype = np.float64
        self.val_dtype = np.float64
G
gongweibao 已提交
63

64 65 66
    def init_shape(self):
        self.x_shape = [13, 17]
        self.y_shape = [13, 17]
H
hong 已提交
67

68 69
    def gen_data(self, shape):
        return np.random.uniform(0.1, 1, shape)
G
gongweibao 已提交
70

71 72
    def compute_output(self, x, y):
        return x / y
G
gongweibao 已提交
73

74 75
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y
G
gongweibao 已提交
76

77 78
    def compute_gradient_y(self, grad_out, out, y):
        return -1 * grad_out * out / y
G
gongweibao 已提交
79

80 81 82 83 84 85 86 87
    def test_check_output(self):
        if self.place is None:
            self.check_output()
        else:
            self.check_output_with_place(self.place)

    def test_check_gradient(self):
        check_list = []
88 89 90 91 92 93 94 95 96 97 98 99 100
        check_list.append(
            {
                'grad': ['X', 'Y'],
                'no_grad': None,
                'val_grad': [self.grad_x, self.grad_y],
            }
        )
        check_list.append(
            {'grad': ['Y'], 'no_grad': set('X'), 'val_grad': [self.grad_y]}
        )
        check_list.append(
            {'grad': ['X'], 'no_grad': set('Y'), 'val_grad': [self.grad_x]}
        )
101 102 103 104 105 106
        for check_option in check_list:
            check_args = [check_option['grad'], 'Out']
            check_kwargs = {
                'no_grad_set': check_option['no_grad'],
                'user_defined_grads': check_option['val_grad'],
                'user_defined_grad_outputs': [self.grad_out],
107
                'check_dygraph': self.check_dygraph,
108 109 110 111 112 113
            }
            if self.place is None:
                self.check_grad(*check_args, **check_kwargs)
            else:
                check_args.insert(0, self.place)
                self.check_grad_with_place(*check_args, **check_kwargs)
W
Wu Yi 已提交
114

G
gongweibao 已提交
115

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = []
        self.y_shape = []


class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [13, 17]
        self.y_shape = []

    def compute_output(self, x, y):
        return x / y.reshape([1, 1])

    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape([1, 1])

    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape([1, 1]))


class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = []
        self.y_shape = [13, 17]

    def compute_output(self, x, y):
        return x.reshape([1, 1]) / y

    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y)

    def compute_gradient_y(self, grad_out, out, y):
        return -1 * grad_out * out / y


152 153 154 155 156
@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
157 158 159 160 161 162 163
class TestElementwiseDivOpBF16(ElementwiseDivOp):
    def init_args(self):
        # In due to output data type inconsistence of bfloat16 paddle op, we disable the dygraph check.
        self.check_dygraph = False
        self.place = core.CUDAPlace(0)

    def init_dtype(self):
164
        self.dtype = np.uint16
165
        self.val_dtype = np.float32
166

167 168 169
    def init_shape(self):
        self.x_shape = [12, 13]
        self.y_shape = [12, 13]
170 171


172
@skip_check_grad_ci(
173 174
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
175 176 177 178
class TestElementwiseDivOpScalar(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [20, 3, 4]
        self.y_shape = [1]
179

180 181
    def compute_gradient_y(self, grad_out, out, y):
        return np.array([np.sum(-1 * grad_out * out / y)])
182 183


184 185 186 187
class TestElementwiseDivOpVector(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [100]
        self.y_shape = [100]
188

189

190 191 192 193 194
class TestElementwiseDivOpBroadcast0(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [100, 3, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 0}
195

196 197
    def compute_output(self, x, y):
        return x / y.reshape(100, 1, 1)
198

199 200
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(100, 1, 1)
G
gongweibao 已提交
201

202 203
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(100, 1, 1), axis=(1, 2))
G
gongweibao 已提交
204

205

206 207 208 209 210
class TestElementwiseDivOpBroadcast1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 100, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 1}
G
gongweibao 已提交
211

212 213
    def compute_output(self, x, y):
        return x / y.reshape(1, 100, 1)
G
gongweibao 已提交
214

215 216
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 100, 1)
217

218 219
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 100, 1), axis=(0, 2))
G
gongweibao 已提交
220 221


222 223 224 225
class TestElementwiseDivOpBroadcast2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [100]
226

227 228
    def compute_output(self, x, y):
        return x / y.reshape(1, 1, 100)
G
gongweibao 已提交
229

230 231
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 1, 100)
G
gongweibao 已提交
232

233 234
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 1, 100), axis=(0, 1))
G
gongweibao 已提交
235

236

237 238 239 240
class TestElementwiseDivOpBroadcast3(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 10, 12, 5]
        self.y_shape = [10, 12]
G
gongweibao 已提交
241 242
        self.attrs = {'axis': 1}

243 244
    def compute_output(self, x, y):
        return x / y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
245

246 247
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 10, 12, 1)
248

249
    def compute_gradient_y(self, grad_out, out, y):
250 251 252
        return np.sum(
            -1 * grad_out * out / y.reshape(1, 10, 12, 1), axis=(0, 3)
        )
253 254


255 256 257 258
class TestElementwiseDivOpBroadcast4(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 50]
        self.y_shape = [2, 1, 50]
259

260 261
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1)).reshape(2, 1, 50)
262

263

264 265 266 267
class TestElementwiseDivOpBroadcast5(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 4, 20]
        self.y_shape = [2, 3, 1, 20]
268

269 270
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(2)).reshape(2, 3, 1, 20)
271

272

273 274 275 276
class TestElementwiseDivOpCommonuse1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [1, 1, 100]
277

278 279 280 281 282 283 284 285 286 287 288
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(0, 1)).reshape(1, 1, 100)


class TestElementwiseDivOpCommonuse2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [30, 3, 1, 5]
        self.y_shape = [30, 1, 4, 1]

    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(2)).reshape(30, 3, 1, 5)
289

290 291 292 293 294 295 296 297
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1, 3)).reshape(30, 1, 4, 1)


class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [10, 12]
        self.y_shape = [2, 3, 10, 12]
298 299
        self.attrs = {'axis': 2}

300 301
    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(0, 1))
302 303


304 305
class TestElementwiseDivOpInt(ElementwiseDivOp):
    def init_dtype(self):
306
        self.dtype = np.int32
307
        self.val_dtype = np.int32
308

309 310
    def gen_data(self, shape):
        return np.random.randint(1, 5, size=shape)
311

312 313
    def compute_output(self, x, y):
        return x // y
314 315


316 317 318
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
W
Wu Yi 已提交
319 320 321
class TestElementwiseDivOpFp16(ElementwiseDivOp):
    def init_dtype(self):
        self.dtype = np.float16
322
        self.val_dtype = np.float16
W
Wu Yi 已提交
323 324


325 326 327
class TestElementwiseDivBroadcast(unittest.TestCase):
    def test_shape_with_batch_sizes(self):
        with fluid.program_guard(fluid.Program()):
328 329 330 331
            x_var = fluid.data(
                name='x', dtype='float32', shape=[None, 3, None, None]
            )
            one = 2.0
332 333 334
            out = one / x_var
            exe = fluid.Executor(fluid.CPUPlace())
            x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32")
335
            (out_result,) = exe.run(feed={'x': x}, fetch_list=[out])
336 337 338
            self.assertEqual((out_result == (2 / x)).all(), True)


S
ShenLiang 已提交
339 340 341 342 343
class TestDivideOp(unittest.TestCase):
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')
344

S
ShenLiang 已提交
345 346
            y_1 = paddle.divide(x, y, name='div_res')
            self.assertEqual(('div_res' in y_1.name), True)
347 348

    def test_dygraph(self):
S
ShenLiang 已提交
349 350 351 352 353 354 355
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = paddle.divide(x, y)
            np_z = z.numpy()
356
            z_expected = np.array([2.0, 0.6, 2.0])
S
ShenLiang 已提交
357
            self.assertEqual((np_z == z_expected).all(), True)
358 359


360 361 362
class TestComplexElementwiseDivOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
363
        self.python_api = paddle.divide
364 365 366 367 368 369
        self.init_base_dtype()
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
370
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
371 372 373 374 375 376 377 378
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
379 380 381 382 383 384
        self.x = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
385 386 387
        self.out = self.x / self.y

    def init_grad_input_output(self):
388 389 390
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
391 392 393 394
        self.grad_x = self.grad_out / np.conj(self.y)
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)

    def test_check_output(self):
H
hong 已提交
395
        self.check_output(check_eager=False)
396 397

    def test_check_grad_normal(self):
398 399 400 401 402 403
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
404 405

    def test_check_grad_ingore_x(self):
406 407 408 409 410 411 412
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
413 414

    def test_check_grad_ingore_y(self):
415 416 417 418 419 420 421
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out],
        )
422 423


C
chentianyu03 已提交
424 425 426
class TestRealComplexElementwiseDivOp(TestComplexElementwiseDivOp):
    def init_input_output(self):
        self.x = np.random.random((2, 3, 4, 5)).astype(self.dtype)
427 428 429
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
C
chentianyu03 已提交
430 431 432
        self.out = self.x / self.y

    def init_grad_input_output(self):
433 434 435
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
C
chentianyu03 已提交
436 437 438 439
        self.grad_x = np.real(self.grad_out / np.conj(self.y))
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)


440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
class TestElementwiseDivop(unittest.TestCase):
    def func_dygraph_div(self):
        paddle.disable_static()

        np_a = np.random.random((2, 3, 4)).astype(np.float32)
        np_b = np.random.random((2, 3, 4)).astype(np.float32)
        np_a[np.abs(np_a) < 0.0005] = 0.002
        np_b[np.abs(np_b) < 0.0005] = 0.002

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: nparray / tenor
        expect_out = np_a / np_b
        actual_out = np_a / tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor / nparray
        actual_out = tensor_a / np_b
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()

    def test_dygraph_div(self):
        with _test_eager_guard():
            self.func_dygraph_div()


G
gongweibao 已提交
468
if __name__ == '__main__':
469
    paddle.enable_static()
G
gongweibao 已提交
470
    unittest.main()