test_elementwise_div_op.py 12.8 KB
Newer Older
1
#  Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

G
gongweibao 已提交
15 16
import unittest
import numpy as np
17
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
18 19 20
import paddle
from paddle import fluid
from paddle.fluid import core
G
gongweibao 已提交
21 22 23 24 25


class ElementwiseDivOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
26
        self.python_api = paddle.divide
27
        self.init_args()
W
Wu Yi 已提交
28
        self.init_dtype()
29 30 31 32 33 34 35
        self.init_shape()

        x = self.gen_data(self.x_shape).astype(self.val_dtype)
        y = self.gen_data(self.y_shape).astype(self.val_dtype)
        out = self.compute_output(x, y).astype(self.val_dtype)
        grad_out = np.ones(out.shape).astype(self.val_dtype)
        grad_x = self.compute_gradient_x(grad_out, y).astype(self.val_dtype)
36 37 38
        grad_y = self.compute_gradient_y(grad_out, out, y).astype(
            self.val_dtype
        )
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57

        # Convert np.float32 data to np.uint16 for bfloat16 Paddle OP
        if self.dtype == np.uint16:
            x = convert_float_to_uint16(x)
            y = convert_float_to_uint16(y)
            out = convert_float_to_uint16(out)
            grad_out = convert_float_to_uint16(grad_out)
            grad_x = convert_float_to_uint16(grad_x)
            grad_y = convert_float_to_uint16(grad_y)

        self.inputs = {'X': x, 'Y': y}
        self.outputs = {'Out': out}
        self.grad_out = grad_out
        self.grad_x = grad_x
        self.grad_y = grad_y

    def init_args(self):
        self.check_dygraph = True
        self.place = None
H
hong 已提交
58

59 60 61
    def init_dtype(self):
        self.dtype = np.float64
        self.val_dtype = np.float64
G
gongweibao 已提交
62

63 64 65
    def init_shape(self):
        self.x_shape = [13, 17]
        self.y_shape = [13, 17]
H
hong 已提交
66

67 68
    def gen_data(self, shape):
        return np.random.uniform(0.1, 1, shape)
G
gongweibao 已提交
69

70 71
    def compute_output(self, x, y):
        return x / y
G
gongweibao 已提交
72

73 74
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y
G
gongweibao 已提交
75

76 77
    def compute_gradient_y(self, grad_out, out, y):
        return -1 * grad_out * out / y
G
gongweibao 已提交
78

79 80 81 82 83 84 85 86
    def test_check_output(self):
        if self.place is None:
            self.check_output()
        else:
            self.check_output_with_place(self.place)

    def test_check_gradient(self):
        check_list = []
87 88 89 90 91 92 93 94 95 96 97 98 99
        check_list.append(
            {
                'grad': ['X', 'Y'],
                'no_grad': None,
                'val_grad': [self.grad_x, self.grad_y],
            }
        )
        check_list.append(
            {'grad': ['Y'], 'no_grad': set('X'), 'val_grad': [self.grad_y]}
        )
        check_list.append(
            {'grad': ['X'], 'no_grad': set('Y'), 'val_grad': [self.grad_x]}
        )
100 101 102 103 104 105
        for check_option in check_list:
            check_args = [check_option['grad'], 'Out']
            check_kwargs = {
                'no_grad_set': check_option['no_grad'],
                'user_defined_grads': check_option['val_grad'],
                'user_defined_grad_outputs': [self.grad_out],
106
                'check_dygraph': self.check_dygraph,
107 108 109 110 111 112
            }
            if self.place is None:
                self.check_grad(*check_args, **check_kwargs)
            else:
                check_args.insert(0, self.place)
                self.check_grad_with_place(*check_args, **check_kwargs)
W
Wu Yi 已提交
113

G
gongweibao 已提交
114

115 116 117 118 119
@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
120 121 122 123 124 125 126
class TestElementwiseDivOpBF16(ElementwiseDivOp):
    def init_args(self):
        # In due to output data type inconsistence of bfloat16 paddle op, we disable the dygraph check.
        self.check_dygraph = False
        self.place = core.CUDAPlace(0)

    def init_dtype(self):
127
        self.dtype = np.uint16
128
        self.val_dtype = np.float32
129

130 131 132
    def init_shape(self):
        self.x_shape = [12, 13]
        self.y_shape = [12, 13]
133 134


135
@skip_check_grad_ci(
136 137
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
138 139 140 141
class TestElementwiseDivOpScalar(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [20, 3, 4]
        self.y_shape = [1]
142

143 144
    def compute_gradient_y(self, grad_out, out, y):
        return np.array([np.sum(-1 * grad_out * out / y)])
145 146


147 148 149 150
class TestElementwiseDivOpVector(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [100]
        self.y_shape = [100]
151

152

153 154 155 156 157
class TestElementwiseDivOpBroadcast0(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [100, 3, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 0}
158

159 160
    def compute_output(self, x, y):
        return x / y.reshape(100, 1, 1)
161

162 163
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(100, 1, 1)
G
gongweibao 已提交
164

165 166
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(100, 1, 1), axis=(1, 2))
G
gongweibao 已提交
167

168

169 170 171 172 173
class TestElementwiseDivOpBroadcast1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 100, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 1}
G
gongweibao 已提交
174

175 176
    def compute_output(self, x, y):
        return x / y.reshape(1, 100, 1)
G
gongweibao 已提交
177

178 179
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 100, 1)
180

181 182
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 100, 1), axis=(0, 2))
G
gongweibao 已提交
183 184


185 186 187 188
class TestElementwiseDivOpBroadcast2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [100]
189

190 191
    def compute_output(self, x, y):
        return x / y.reshape(1, 1, 100)
G
gongweibao 已提交
192

193 194
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 1, 100)
G
gongweibao 已提交
195

196 197
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 1, 100), axis=(0, 1))
G
gongweibao 已提交
198

199

200 201 202 203
class TestElementwiseDivOpBroadcast3(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 10, 12, 5]
        self.y_shape = [10, 12]
G
gongweibao 已提交
204 205
        self.attrs = {'axis': 1}

206 207
    def compute_output(self, x, y):
        return x / y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
208

209 210
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 10, 12, 1)
211

212
    def compute_gradient_y(self, grad_out, out, y):
213 214 215
        return np.sum(
            -1 * grad_out * out / y.reshape(1, 10, 12, 1), axis=(0, 3)
        )
216 217


218 219 220 221
class TestElementwiseDivOpBroadcast4(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 50]
        self.y_shape = [2, 1, 50]
222

223 224
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1)).reshape(2, 1, 50)
225

226

227 228 229 230
class TestElementwiseDivOpBroadcast5(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 4, 20]
        self.y_shape = [2, 3, 1, 20]
231

232 233
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(2)).reshape(2, 3, 1, 20)
234

235

236 237 238 239
class TestElementwiseDivOpCommonuse1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [1, 1, 100]
240

241 242 243 244 245 246 247 248 249 250 251
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(0, 1)).reshape(1, 1, 100)


class TestElementwiseDivOpCommonuse2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [30, 3, 1, 5]
        self.y_shape = [30, 1, 4, 1]

    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(2)).reshape(30, 3, 1, 5)
252

253 254 255 256 257 258 259 260
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1, 3)).reshape(30, 1, 4, 1)


class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [10, 12]
        self.y_shape = [2, 3, 10, 12]
261 262
        self.attrs = {'axis': 2}

263 264
    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(0, 1))
265 266


267 268
class TestElementwiseDivOpInt(ElementwiseDivOp):
    def init_dtype(self):
269
        self.dtype = np.int32
270
        self.val_dtype = np.int32
271

272 273
    def gen_data(self, shape):
        return np.random.randint(1, 5, size=shape)
274

275 276
    def compute_output(self, x, y):
        return x // y
277 278


279 280 281
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
W
Wu Yi 已提交
282 283 284
class TestElementwiseDivOpFp16(ElementwiseDivOp):
    def init_dtype(self):
        self.dtype = np.float16
285
        self.val_dtype = np.float16
W
Wu Yi 已提交
286 287


288 289 290
class TestElementwiseDivBroadcast(unittest.TestCase):
    def test_shape_with_batch_sizes(self):
        with fluid.program_guard(fluid.Program()):
291 292 293 294
            x_var = fluid.data(
                name='x', dtype='float32', shape=[None, 3, None, None]
            )
            one = 2.0
295 296 297
            out = one / x_var
            exe = fluid.Executor(fluid.CPUPlace())
            x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32")
298
            (out_result,) = exe.run(feed={'x': x}, fetch_list=[out])
299 300 301
            self.assertEqual((out_result == (2 / x)).all(), True)


S
ShenLiang 已提交
302 303 304 305 306
class TestDivideOp(unittest.TestCase):
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')
307

S
ShenLiang 已提交
308 309
            y_1 = paddle.divide(x, y, name='div_res')
            self.assertEqual(('div_res' in y_1.name), True)
310 311

    def test_dygraph(self):
S
ShenLiang 已提交
312 313 314 315 316 317 318
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = paddle.divide(x, y)
            np_z = z.numpy()
319
            z_expected = np.array([2.0, 0.6, 2.0])
S
ShenLiang 已提交
320
            self.assertEqual((np_z == z_expected).all(), True)
321 322


323 324 325
class TestComplexElementwiseDivOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
326
        self.python_api = paddle.divide
327 328 329 330 331 332
        self.init_base_dtype()
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
333
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
334 335 336 337 338 339 340 341
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
342 343 344 345 346 347
        self.x = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
348 349 350
        self.out = self.x / self.y

    def init_grad_input_output(self):
351 352 353
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
354 355 356 357
        self.grad_x = self.grad_out / np.conj(self.y)
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)

    def test_check_output(self):
H
hong 已提交
358
        self.check_output(check_eager=False)
359 360

    def test_check_grad_normal(self):
361 362 363 364 365 366
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
367 368

    def test_check_grad_ingore_x(self):
369 370 371 372 373 374 375
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
376 377

    def test_check_grad_ingore_y(self):
378 379 380 381 382 383 384
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out],
        )
385 386


C
chentianyu03 已提交
387 388 389
class TestRealComplexElementwiseDivOp(TestComplexElementwiseDivOp):
    def init_input_output(self):
        self.x = np.random.random((2, 3, 4, 5)).astype(self.dtype)
390 391 392
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
C
chentianyu03 已提交
393 394 395
        self.out = self.x / self.y

    def init_grad_input_output(self):
396 397 398
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
C
chentianyu03 已提交
399 400 401 402
        self.grad_x = np.real(self.grad_out / np.conj(self.y))
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)


G
gongweibao 已提交
403
if __name__ == '__main__':
404
    paddle.enable_static()
G
gongweibao 已提交
405
    unittest.main()