test_elementwise_div_op.py 13.7 KB
Newer Older
1
#  Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

G
gongweibao 已提交
15 16
import unittest
import numpy as np
17
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
18 19 20
import paddle
from paddle import fluid
from paddle.fluid import core
G
gongweibao 已提交
21 22 23 24 25


class ElementwiseDivOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
26
        self.python_api = paddle.divide
27
        self.init_args()
W
Wu Yi 已提交
28
        self.init_dtype()
29 30 31 32 33 34 35
        self.init_shape()

        x = self.gen_data(self.x_shape).astype(self.val_dtype)
        y = self.gen_data(self.y_shape).astype(self.val_dtype)
        out = self.compute_output(x, y).astype(self.val_dtype)
        grad_out = np.ones(out.shape).astype(self.val_dtype)
        grad_x = self.compute_gradient_x(grad_out, y).astype(self.val_dtype)
36 37 38
        grad_y = self.compute_gradient_y(grad_out, out, y).astype(
            self.val_dtype
        )
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57

        # Convert np.float32 data to np.uint16 for bfloat16 Paddle OP
        if self.dtype == np.uint16:
            x = convert_float_to_uint16(x)
            y = convert_float_to_uint16(y)
            out = convert_float_to_uint16(out)
            grad_out = convert_float_to_uint16(grad_out)
            grad_x = convert_float_to_uint16(grad_x)
            grad_y = convert_float_to_uint16(grad_y)

        self.inputs = {'X': x, 'Y': y}
        self.outputs = {'Out': out}
        self.grad_out = grad_out
        self.grad_x = grad_x
        self.grad_y = grad_y

    def init_args(self):
        self.check_dygraph = True
        self.place = None
H
hong 已提交
58

59 60 61
    def init_dtype(self):
        self.dtype = np.float64
        self.val_dtype = np.float64
G
gongweibao 已提交
62

63 64 65
    def init_shape(self):
        self.x_shape = [13, 17]
        self.y_shape = [13, 17]
H
hong 已提交
66

67 68
    def gen_data(self, shape):
        return np.random.uniform(0.1, 1, shape)
G
gongweibao 已提交
69

70 71
    def compute_output(self, x, y):
        return x / y
G
gongweibao 已提交
72

73 74
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y
G
gongweibao 已提交
75

76 77
    def compute_gradient_y(self, grad_out, out, y):
        return -1 * grad_out * out / y
G
gongweibao 已提交
78

79 80 81 82 83 84 85 86
    def test_check_output(self):
        if self.place is None:
            self.check_output()
        else:
            self.check_output_with_place(self.place)

    def test_check_gradient(self):
        check_list = []
87 88 89 90 91 92 93 94 95 96 97 98 99
        check_list.append(
            {
                'grad': ['X', 'Y'],
                'no_grad': None,
                'val_grad': [self.grad_x, self.grad_y],
            }
        )
        check_list.append(
            {'grad': ['Y'], 'no_grad': set('X'), 'val_grad': [self.grad_y]}
        )
        check_list.append(
            {'grad': ['X'], 'no_grad': set('Y'), 'val_grad': [self.grad_x]}
        )
100 101 102 103 104 105
        for check_option in check_list:
            check_args = [check_option['grad'], 'Out']
            check_kwargs = {
                'no_grad_set': check_option['no_grad'],
                'user_defined_grads': check_option['val_grad'],
                'user_defined_grad_outputs': [self.grad_out],
106
                'check_dygraph': self.check_dygraph,
107 108 109 110 111 112
            }
            if self.place is None:
                self.check_grad(*check_args, **check_kwargs)
            else:
                check_args.insert(0, self.place)
                self.check_grad_with_place(*check_args, **check_kwargs)
W
Wu Yi 已提交
113

G
gongweibao 已提交
114

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = []
        self.y_shape = []


class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [13, 17]
        self.y_shape = []

    def compute_output(self, x, y):
        return x / y.reshape([1, 1])

    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape([1, 1])

    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape([1, 1]))


class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = []
        self.y_shape = [13, 17]

    def compute_output(self, x, y):
        return x.reshape([1, 1]) / y

    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y)

    def compute_gradient_y(self, grad_out, out, y):
        return -1 * grad_out * out / y


151 152 153 154 155
@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
156 157 158 159 160 161 162
class TestElementwiseDivOpBF16(ElementwiseDivOp):
    def init_args(self):
        # In due to output data type inconsistence of bfloat16 paddle op, we disable the dygraph check.
        self.check_dygraph = False
        self.place = core.CUDAPlace(0)

    def init_dtype(self):
163
        self.dtype = np.uint16
164
        self.val_dtype = np.float32
165

166 167 168
    def init_shape(self):
        self.x_shape = [12, 13]
        self.y_shape = [12, 13]
169 170


171
@skip_check_grad_ci(
172 173
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
174 175 176 177
class TestElementwiseDivOpScalar(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [20, 3, 4]
        self.y_shape = [1]
178

179 180
    def compute_gradient_y(self, grad_out, out, y):
        return np.array([np.sum(-1 * grad_out * out / y)])
181 182


183 184 185 186
class TestElementwiseDivOpVector(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [100]
        self.y_shape = [100]
187

188

189 190 191 192 193
class TestElementwiseDivOpBroadcast0(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [100, 3, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 0}
194

195 196
    def compute_output(self, x, y):
        return x / y.reshape(100, 1, 1)
197

198 199
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(100, 1, 1)
G
gongweibao 已提交
200

201 202
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(100, 1, 1), axis=(1, 2))
G
gongweibao 已提交
203

204

205 206 207 208 209
class TestElementwiseDivOpBroadcast1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 100, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 1}
G
gongweibao 已提交
210

211 212
    def compute_output(self, x, y):
        return x / y.reshape(1, 100, 1)
G
gongweibao 已提交
213

214 215
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 100, 1)
216

217 218
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 100, 1), axis=(0, 2))
G
gongweibao 已提交
219 220


221 222 223 224
class TestElementwiseDivOpBroadcast2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [100]
225

226 227
    def compute_output(self, x, y):
        return x / y.reshape(1, 1, 100)
G
gongweibao 已提交
228

229 230
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 1, 100)
G
gongweibao 已提交
231

232 233
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 1, 100), axis=(0, 1))
G
gongweibao 已提交
234

235

236 237 238 239
class TestElementwiseDivOpBroadcast3(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 10, 12, 5]
        self.y_shape = [10, 12]
G
gongweibao 已提交
240 241
        self.attrs = {'axis': 1}

242 243
    def compute_output(self, x, y):
        return x / y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
244

245 246
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 10, 12, 1)
247

248
    def compute_gradient_y(self, grad_out, out, y):
249 250 251
        return np.sum(
            -1 * grad_out * out / y.reshape(1, 10, 12, 1), axis=(0, 3)
        )
252 253


254 255 256 257
class TestElementwiseDivOpBroadcast4(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 50]
        self.y_shape = [2, 1, 50]
258

259 260
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1)).reshape(2, 1, 50)
261

262

263 264 265 266
class TestElementwiseDivOpBroadcast5(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 4, 20]
        self.y_shape = [2, 3, 1, 20]
267

268 269
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(2)).reshape(2, 3, 1, 20)
270

271

272 273 274 275
class TestElementwiseDivOpCommonuse1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [1, 1, 100]
276

277 278 279 280 281 282 283 284 285 286 287
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(0, 1)).reshape(1, 1, 100)


class TestElementwiseDivOpCommonuse2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [30, 3, 1, 5]
        self.y_shape = [30, 1, 4, 1]

    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(2)).reshape(30, 3, 1, 5)
288

289 290 291 292 293 294 295 296
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1, 3)).reshape(30, 1, 4, 1)


class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [10, 12]
        self.y_shape = [2, 3, 10, 12]
297 298
        self.attrs = {'axis': 2}

299 300
    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(0, 1))
301 302


303 304
class TestElementwiseDivOpInt(ElementwiseDivOp):
    def init_dtype(self):
305
        self.dtype = np.int32
306
        self.val_dtype = np.int32
307

308 309
    def gen_data(self, shape):
        return np.random.randint(1, 5, size=shape)
310

311 312
    def compute_output(self, x, y):
        return x // y
313 314


315 316 317
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
W
Wu Yi 已提交
318 319 320
class TestElementwiseDivOpFp16(ElementwiseDivOp):
    def init_dtype(self):
        self.dtype = np.float16
321
        self.val_dtype = np.float16
W
Wu Yi 已提交
322 323


324 325 326
class TestElementwiseDivBroadcast(unittest.TestCase):
    def test_shape_with_batch_sizes(self):
        with fluid.program_guard(fluid.Program()):
327 328 329 330
            x_var = fluid.data(
                name='x', dtype='float32', shape=[None, 3, None, None]
            )
            one = 2.0
331 332 333
            out = one / x_var
            exe = fluid.Executor(fluid.CPUPlace())
            x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32")
334
            (out_result,) = exe.run(feed={'x': x}, fetch_list=[out])
335 336 337
            self.assertEqual((out_result == (2 / x)).all(), True)


S
ShenLiang 已提交
338 339 340 341 342
class TestDivideOp(unittest.TestCase):
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')
343

S
ShenLiang 已提交
344 345
            y_1 = paddle.divide(x, y, name='div_res')
            self.assertEqual(('div_res' in y_1.name), True)
346 347

    def test_dygraph(self):
S
ShenLiang 已提交
348 349 350 351 352 353 354
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = paddle.divide(x, y)
            np_z = z.numpy()
355
            z_expected = np.array([2.0, 0.6, 2.0])
S
ShenLiang 已提交
356
            self.assertEqual((np_z == z_expected).all(), True)
357 358


359 360 361
class TestComplexElementwiseDivOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
362
        self.python_api = paddle.divide
363 364 365 366 367 368
        self.init_base_dtype()
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
369
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
370 371 372 373 374 375 376 377
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
378 379 380 381 382 383
        self.x = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
384 385 386
        self.out = self.x / self.y

    def init_grad_input_output(self):
387 388 389
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
390 391 392 393
        self.grad_x = self.grad_out / np.conj(self.y)
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)

    def test_check_output(self):
H
hong 已提交
394
        self.check_output(check_eager=False)
395 396

    def test_check_grad_normal(self):
397 398 399 400 401 402
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
403 404

    def test_check_grad_ingore_x(self):
405 406 407 408 409 410 411
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
412 413

    def test_check_grad_ingore_y(self):
414 415 416 417 418 419 420
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out],
        )
421 422


C
chentianyu03 已提交
423 424 425
class TestRealComplexElementwiseDivOp(TestComplexElementwiseDivOp):
    def init_input_output(self):
        self.x = np.random.random((2, 3, 4, 5)).astype(self.dtype)
426 427 428
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
C
chentianyu03 已提交
429 430 431
        self.out = self.x / self.y

    def init_grad_input_output(self):
432 433 434
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
C
chentianyu03 已提交
435 436 437 438
        self.grad_x = np.real(self.grad_out / np.conj(self.y))
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)


G
gongweibao 已提交
439
if __name__ == '__main__':
440
    paddle.enable_static()
G
gongweibao 已提交
441
    unittest.main()