test_elementwise_div_op.py 14.8 KB
Newer Older
1
#  Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

G
gongweibao 已提交
15
import unittest
16

G
gongweibao 已提交
17
import numpy as np
18
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
19

20 21 22
import paddle
from paddle import fluid
from paddle.fluid import core
G
gongweibao 已提交
23 24


25 26 27 28 29 30 31
def broadcast_wrapper(shape=[1, 10, 12, 1]):
    def div_wrapper(x, y, axis=-1):
        return paddle.divide(x, y.reshape(shape))

    return div_wrapper


G
gongweibao 已提交
32 33 34
class ElementwiseDivOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
35
        self.python_api = paddle.divide
36
        self.init_args()
W
Wu Yi 已提交
37
        self.init_dtype()
38 39 40 41 42 43 44
        self.init_shape()

        x = self.gen_data(self.x_shape).astype(self.val_dtype)
        y = self.gen_data(self.y_shape).astype(self.val_dtype)
        out = self.compute_output(x, y).astype(self.val_dtype)
        grad_out = np.ones(out.shape).astype(self.val_dtype)
        grad_x = self.compute_gradient_x(grad_out, y).astype(self.val_dtype)
45 46 47
        grad_y = self.compute_gradient_y(grad_out, out, y).astype(
            self.val_dtype
        )
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66

        # Convert np.float32 data to np.uint16 for bfloat16 Paddle OP
        if self.dtype == np.uint16:
            x = convert_float_to_uint16(x)
            y = convert_float_to_uint16(y)
            out = convert_float_to_uint16(out)
            grad_out = convert_float_to_uint16(grad_out)
            grad_x = convert_float_to_uint16(grad_x)
            grad_y = convert_float_to_uint16(grad_y)

        self.inputs = {'X': x, 'Y': y}
        self.outputs = {'Out': out}
        self.grad_out = grad_out
        self.grad_x = grad_x
        self.grad_y = grad_y

    def init_args(self):
        self.check_dygraph = True
        self.place = None
H
hong 已提交
67

68 69 70
    def init_dtype(self):
        self.dtype = np.float64
        self.val_dtype = np.float64
G
gongweibao 已提交
71

72 73 74
    def init_shape(self):
        self.x_shape = [13, 17]
        self.y_shape = [13, 17]
H
hong 已提交
75

76 77
    def gen_data(self, shape):
        return np.random.uniform(0.1, 1, shape)
G
gongweibao 已提交
78

79 80
    def compute_output(self, x, y):
        return x / y
G
gongweibao 已提交
81

82 83
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y
G
gongweibao 已提交
84

85 86
    def compute_gradient_y(self, grad_out, out, y):
        return -1 * grad_out * out / y
G
gongweibao 已提交
87

88 89 90 91 92 93 94 95
    def test_check_output(self):
        if self.place is None:
            self.check_output()
        else:
            self.check_output_with_place(self.place)

    def test_check_gradient(self):
        check_list = []
96 97 98 99 100 101 102 103 104 105 106 107 108
        check_list.append(
            {
                'grad': ['X', 'Y'],
                'no_grad': None,
                'val_grad': [self.grad_x, self.grad_y],
            }
        )
        check_list.append(
            {'grad': ['Y'], 'no_grad': set('X'), 'val_grad': [self.grad_y]}
        )
        check_list.append(
            {'grad': ['X'], 'no_grad': set('Y'), 'val_grad': [self.grad_x]}
        )
109 110 111 112 113 114
        for check_option in check_list:
            check_args = [check_option['grad'], 'Out']
            check_kwargs = {
                'no_grad_set': check_option['no_grad'],
                'user_defined_grads': check_option['val_grad'],
                'user_defined_grad_outputs': [self.grad_out],
115
                'check_dygraph': self.check_dygraph,
116 117 118 119 120 121
            }
            if self.place is None:
                self.check_grad(*check_args, **check_kwargs)
            else:
                check_args.insert(0, self.place)
                self.check_grad_with_place(*check_args, **check_kwargs)
W
Wu Yi 已提交
122

G
gongweibao 已提交
123

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = []
        self.y_shape = []


class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [13, 17]
        self.y_shape = []

    def compute_output(self, x, y):
        return x / y.reshape([1, 1])

    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape([1, 1])

    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape([1, 1]))


class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = []
        self.y_shape = [13, 17]

    def compute_output(self, x, y):
        return x.reshape([1, 1]) / y

    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y)

    def compute_gradient_y(self, grad_out, out, y):
        return -1 * grad_out * out / y


160 161 162 163 164
@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
165 166 167 168 169 170 171
class TestElementwiseDivOpBF16(ElementwiseDivOp):
    def init_args(self):
        # In due to output data type inconsistence of bfloat16 paddle op, we disable the dygraph check.
        self.check_dygraph = False
        self.place = core.CUDAPlace(0)

    def init_dtype(self):
172
        self.dtype = np.uint16
173
        self.val_dtype = np.float32
174

175 176 177
    def init_shape(self):
        self.x_shape = [12, 13]
        self.y_shape = [12, 13]
178 179


180
@skip_check_grad_ci(
181 182
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
183 184 185 186
class TestElementwiseDivOpScalar(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [20, 3, 4]
        self.y_shape = [1]
187

188 189
    def compute_gradient_y(self, grad_out, out, y):
        return np.array([np.sum(-1 * grad_out * out / y)])
190 191


192 193 194 195
class TestElementwiseDivOpVector(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [100]
        self.y_shape = [100]
196

197

198 199 200 201 202
class TestElementwiseDivOpBroadcast0(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [100, 3, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 0}
203
        self.python_api = broadcast_wrapper(shape=[100, 1, 1])
204

205 206
    def compute_output(self, x, y):
        return x / y.reshape(100, 1, 1)
207

208 209
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(100, 1, 1)
G
gongweibao 已提交
210

211 212
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(100, 1, 1), axis=(1, 2))
G
gongweibao 已提交
213

214

215 216 217 218 219
class TestElementwiseDivOpBroadcast1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 100, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 1}
220
        self.python_api = broadcast_wrapper(shape=[1, 100, 1])
G
gongweibao 已提交
221

222 223
    def compute_output(self, x, y):
        return x / y.reshape(1, 100, 1)
G
gongweibao 已提交
224

225 226
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 100, 1)
227

228 229
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 100, 1), axis=(0, 2))
G
gongweibao 已提交
230 231


232 233 234 235
class TestElementwiseDivOpBroadcast2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [100]
236
        self.python_api = broadcast_wrapper(shape=[1, 1, 100])
237

238 239
    def compute_output(self, x, y):
        return x / y.reshape(1, 1, 100)
G
gongweibao 已提交
240

241 242
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 1, 100)
G
gongweibao 已提交
243

244 245
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 1, 100), axis=(0, 1))
G
gongweibao 已提交
246

247

248 249 250 251
class TestElementwiseDivOpBroadcast3(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 10, 12, 5]
        self.y_shape = [10, 12]
G
gongweibao 已提交
252
        self.attrs = {'axis': 1}
253
        self.python_api = broadcast_wrapper(shape=[1, 10, 12, 1])
G
gongweibao 已提交
254

255 256
    def compute_output(self, x, y):
        return x / y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
257

258 259
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 10, 12, 1)
260

261
    def compute_gradient_y(self, grad_out, out, y):
262 263 264
        return np.sum(
            -1 * grad_out * out / y.reshape(1, 10, 12, 1), axis=(0, 3)
        )
265 266


267 268 269 270
class TestElementwiseDivOpBroadcast4(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 50]
        self.y_shape = [2, 1, 50]
271

272 273
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1)).reshape(2, 1, 50)
274

275

276 277 278 279
class TestElementwiseDivOpBroadcast5(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 4, 20]
        self.y_shape = [2, 3, 1, 20]
280

281 282
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(2)).reshape(2, 3, 1, 20)
283

284

285 286 287 288
class TestElementwiseDivOpCommonuse1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [1, 1, 100]
289

290 291 292 293 294 295 296 297 298 299 300
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(0, 1)).reshape(1, 1, 100)


class TestElementwiseDivOpCommonuse2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [30, 3, 1, 5]
        self.y_shape = [30, 1, 4, 1]

    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(2)).reshape(30, 3, 1, 5)
301

302 303 304 305 306 307 308 309
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1, 3)).reshape(30, 1, 4, 1)


class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [10, 12]
        self.y_shape = [2, 3, 10, 12]
310 311
        self.attrs = {'axis': 2}

312 313
    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(0, 1))
314 315


316 317
class TestElementwiseDivOpInt(ElementwiseDivOp):
    def init_dtype(self):
318
        self.dtype = np.int32
319
        self.val_dtype = np.int32
320

321 322
    def gen_data(self, shape):
        return np.random.randint(1, 5, size=shape)
323

324 325
    def compute_output(self, x, y):
        return x // y
326 327


328 329 330
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
W
Wu Yi 已提交
331 332 333
class TestElementwiseDivOpFp16(ElementwiseDivOp):
    def init_dtype(self):
        self.dtype = np.float16
334
        self.val_dtype = np.float16
W
Wu Yi 已提交
335 336


337 338 339
class TestElementwiseDivBroadcast(unittest.TestCase):
    def test_shape_with_batch_sizes(self):
        with fluid.program_guard(fluid.Program()):
340 341 342 343
            x_var = fluid.data(
                name='x', dtype='float32', shape=[None, 3, None, None]
            )
            one = 2.0
344 345 346
            out = one / x_var
            exe = fluid.Executor(fluid.CPUPlace())
            x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32")
347
            (out_result,) = exe.run(feed={'x': x}, fetch_list=[out])
348 349 350
            self.assertEqual((out_result == (2 / x)).all(), True)


S
ShenLiang 已提交
351 352 353 354 355
class TestDivideOp(unittest.TestCase):
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')
356

S
ShenLiang 已提交
357 358
            y_1 = paddle.divide(x, y, name='div_res')
            self.assertEqual(('div_res' in y_1.name), True)
359 360

    def test_dygraph(self):
S
ShenLiang 已提交
361 362 363 364 365 366 367
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = paddle.divide(x, y)
            np_z = z.numpy()
368
            z_expected = np.array([2.0, 0.6, 2.0])
S
ShenLiang 已提交
369
            self.assertEqual((np_z == z_expected).all(), True)
370 371


372 373 374
class TestComplexElementwiseDivOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
375
        self.python_api = paddle.divide
376 377 378 379 380 381
        self.init_base_dtype()
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
382
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
383 384 385 386 387 388 389 390
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
391 392 393 394 395 396
        self.x = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
397 398 399
        self.out = self.x / self.y

    def init_grad_input_output(self):
400 401 402
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
403 404 405 406
        self.grad_x = self.grad_out / np.conj(self.y)
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)

    def test_check_output(self):
407
        self.check_output()
408 409

    def test_check_grad_normal(self):
410 411 412 413 414 415
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
416 417

    def test_check_grad_ingore_x(self):
418 419 420 421 422 423 424
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
425 426

    def test_check_grad_ingore_y(self):
427 428 429 430 431 432 433
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out],
        )
434 435


C
chentianyu03 已提交
436 437 438
class TestRealComplexElementwiseDivOp(TestComplexElementwiseDivOp):
    def init_input_output(self):
        self.x = np.random.random((2, 3, 4, 5)).astype(self.dtype)
439 440 441
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
C
chentianyu03 已提交
442 443 444
        self.out = self.x / self.y

    def init_grad_input_output(self):
445 446 447
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
C
chentianyu03 已提交
448 449 450 451
        self.grad_x = np.real(self.grad_out / np.conj(self.y))
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)


452
class TestElementwiseDivop(unittest.TestCase):
453
    def test_dygraph_div(self):
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
        paddle.disable_static()

        np_a = np.random.random((2, 3, 4)).astype(np.float32)
        np_b = np.random.random((2, 3, 4)).astype(np.float32)
        np_a[np.abs(np_a) < 0.0005] = 0.002
        np_b[np.abs(np_b) < 0.0005] = 0.002

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: nparray / tenor
        expect_out = np_a / np_b
        actual_out = np_a / tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor / nparray
        actual_out = tensor_a / np_b
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()


G
gongweibao 已提交
476
if __name__ == '__main__':
477
    paddle.enable_static()
G
gongweibao 已提交
478
    unittest.main()