test_elementwise_div_op.py 12.9 KB
Newer Older
1
#  Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

G
gongweibao 已提交
15 16
import unittest
import numpy as np
17
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
18 19 20
import paddle
from paddle import fluid
from paddle.fluid import core
G
gongweibao 已提交
21 22 23


class ElementwiseDivOp(OpTest):
24

G
gongweibao 已提交
25 26
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
27
        self.python_api = paddle.divide
28
        self.init_args()
W
Wu Yi 已提交
29
        self.init_dtype()
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
        self.init_shape()

        x = self.gen_data(self.x_shape).astype(self.val_dtype)
        y = self.gen_data(self.y_shape).astype(self.val_dtype)
        out = self.compute_output(x, y).astype(self.val_dtype)
        grad_out = np.ones(out.shape).astype(self.val_dtype)
        grad_x = self.compute_gradient_x(grad_out, y).astype(self.val_dtype)
        grad_y = self.compute_gradient_y(grad_out, out,
                                         y).astype(self.val_dtype)

        # Convert np.float32 data to np.uint16 for bfloat16 Paddle OP
        if self.dtype == np.uint16:
            x = convert_float_to_uint16(x)
            y = convert_float_to_uint16(y)
            out = convert_float_to_uint16(out)
            grad_out = convert_float_to_uint16(grad_out)
            grad_x = convert_float_to_uint16(grad_x)
            grad_y = convert_float_to_uint16(grad_y)

        self.inputs = {'X': x, 'Y': y}
        self.outputs = {'Out': out}
        self.grad_out = grad_out
        self.grad_x = grad_x
        self.grad_y = grad_y

    def init_args(self):
        self.check_dygraph = True
        self.place = None
H
hong 已提交
58

59 60 61
    def init_dtype(self):
        self.dtype = np.float64
        self.val_dtype = np.float64
G
gongweibao 已提交
62

63 64 65
    def init_shape(self):
        self.x_shape = [13, 17]
        self.y_shape = [13, 17]
H
hong 已提交
66

67 68
    def gen_data(self, shape):
        return np.random.uniform(0.1, 1, shape)
G
gongweibao 已提交
69

70 71
    def compute_output(self, x, y):
        return x / y
G
gongweibao 已提交
72

73 74
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y
G
gongweibao 已提交
75

76 77
    def compute_gradient_y(self, grad_out, out, y):
        return -1 * grad_out * out / y
G
gongweibao 已提交
78

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
    def test_check_output(self):
        if self.place is None:
            self.check_output()
        else:
            self.check_output_with_place(self.place)

    def test_check_gradient(self):
        check_list = []
        check_list.append({
            'grad': ['X', 'Y'],
            'no_grad': None,
            'val_grad': [self.grad_x, self.grad_y]
        })
        check_list.append({
            'grad': ['Y'],
            'no_grad': set('X'),
            'val_grad': [self.grad_y]
        })
        check_list.append({
            'grad': ['X'],
            'no_grad': set('Y'),
            'val_grad': [self.grad_x]
        })
        for check_option in check_list:
            check_args = [check_option['grad'], 'Out']
            check_kwargs = {
                'no_grad_set': check_option['no_grad'],
                'user_defined_grads': check_option['val_grad'],
                'user_defined_grad_outputs': [self.grad_out],
                'check_dygraph': self.check_dygraph
            }
            if self.place is None:
                self.check_grad(*check_args, **check_kwargs)
            else:
                check_args.insert(0, self.place)
                self.check_grad_with_place(*check_args, **check_kwargs)
W
Wu Yi 已提交
115

G
gongweibao 已提交
116

117 118
@unittest.skipIf(not core.is_compiled_with_cuda()
                 or not core.is_bfloat16_supported(core.CUDAPlace(0)),
119 120
                 "core is not compiled with CUDA or not support the bfloat16")
class TestElementwiseDivOpBF16(ElementwiseDivOp):
121

122 123 124 125 126 127
    def init_args(self):
        # In due to output data type inconsistence of bfloat16 paddle op, we disable the dygraph check.
        self.check_dygraph = False
        self.place = core.CUDAPlace(0)

    def init_dtype(self):
128
        self.dtype = np.uint16
129
        self.val_dtype = np.float32
130

131 132 133
    def init_shape(self):
        self.x_shape = [12, 13]
        self.y_shape = [12, 13]
134 135


136 137 138
@skip_check_grad_ci(
    reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseDivOpScalar(ElementwiseDivOp):
139

140 141 142
    def init_shape(self):
        self.x_shape = [20, 3, 4]
        self.y_shape = [1]
143

144 145
    def compute_gradient_y(self, grad_out, out, y):
        return np.array([np.sum(-1 * grad_out * out / y)])
146 147


148
class TestElementwiseDivOpVector(ElementwiseDivOp):
149

150 151 152
    def init_shape(self):
        self.x_shape = [100]
        self.y_shape = [100]
153

154

155
class TestElementwiseDivOpBroadcast0(ElementwiseDivOp):
156

157 158 159 160
    def init_shape(self):
        self.x_shape = [100, 3, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 0}
161

162 163
    def compute_output(self, x, y):
        return x / y.reshape(100, 1, 1)
164

165 166
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(100, 1, 1)
G
gongweibao 已提交
167

168 169
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(100, 1, 1), axis=(1, 2))
G
gongweibao 已提交
170

171

172
class TestElementwiseDivOpBroadcast1(ElementwiseDivOp):
G
gongweibao 已提交
173

174 175 176 177
    def init_shape(self):
        self.x_shape = [2, 100, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 1}
G
gongweibao 已提交
178

179 180
    def compute_output(self, x, y):
        return x / y.reshape(1, 100, 1)
G
gongweibao 已提交
181

182 183
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 100, 1)
184

185 186
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 100, 1), axis=(0, 2))
G
gongweibao 已提交
187 188


189
class TestElementwiseDivOpBroadcast2(ElementwiseDivOp):
G
gongweibao 已提交
190

191 192 193
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [100]
194

195 196
    def compute_output(self, x, y):
        return x / y.reshape(1, 1, 100)
G
gongweibao 已提交
197

198 199
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 1, 100)
G
gongweibao 已提交
200

201 202
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 1, 100), axis=(0, 1))
G
gongweibao 已提交
203

204

205
class TestElementwiseDivOpBroadcast3(ElementwiseDivOp):
G
gongweibao 已提交
206

207 208 209
    def init_shape(self):
        self.x_shape = [2, 10, 12, 5]
        self.y_shape = [10, 12]
G
gongweibao 已提交
210 211
        self.attrs = {'axis': 1}

212 213
    def compute_output(self, x, y):
        return x / y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
214

215 216
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 10, 12, 1)
217

218 219 220
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 10, 12, 1),
                      axis=(0, 3))
221 222


223
class TestElementwiseDivOpBroadcast4(ElementwiseDivOp):
224

225 226 227
    def init_shape(self):
        self.x_shape = [2, 3, 50]
        self.y_shape = [2, 1, 50]
228

229 230
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1)).reshape(2, 1, 50)
231

232

233
class TestElementwiseDivOpBroadcast5(ElementwiseDivOp):
234

235 236 237
    def init_shape(self):
        self.x_shape = [2, 3, 4, 20]
        self.y_shape = [2, 3, 1, 20]
238

239 240
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(2)).reshape(2, 3, 1, 20)
241

242

243
class TestElementwiseDivOpCommonuse1(ElementwiseDivOp):
244

245 246 247
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [1, 1, 100]
248

249 250 251 252 253 254 255 256 257 258 259 260
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(0, 1)).reshape(1, 1, 100)


class TestElementwiseDivOpCommonuse2(ElementwiseDivOp):

    def init_shape(self):
        self.x_shape = [30, 3, 1, 5]
        self.y_shape = [30, 1, 4, 1]

    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(2)).reshape(30, 3, 1, 5)
261

262 263 264 265 266 267 268 269 270
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1, 3)).reshape(30, 1, 4, 1)


class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp):

    def init_shape(self):
        self.x_shape = [10, 12]
        self.y_shape = [2, 3, 10, 12]
271 272
        self.attrs = {'axis': 2}

273 274
    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(0, 1))
275 276


277
class TestElementwiseDivOpInt(ElementwiseDivOp):
278

279
    def init_dtype(self):
280
        self.dtype = np.int32
281
        self.val_dtype = np.int32
282

283 284
    def gen_data(self, shape):
        return np.random.randint(1, 5, size=shape)
285

286 287
    def compute_output(self, x, y):
        return x // y
288 289


290 291
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
W
Wu Yi 已提交
292
class TestElementwiseDivOpFp16(ElementwiseDivOp):
293

W
Wu Yi 已提交
294 295
    def init_dtype(self):
        self.dtype = np.float16
296
        self.val_dtype = np.float16
W
Wu Yi 已提交
297 298


299
class TestElementwiseDivBroadcast(unittest.TestCase):
300

301 302
    def test_shape_with_batch_sizes(self):
        with fluid.program_guard(fluid.Program()):
303 304 305
            x_var = fluid.data(name='x',
                               dtype='float32',
                               shape=[None, 3, None, None])
306 307 308 309 310 311 312 313
            one = 2.
            out = one / x_var
            exe = fluid.Executor(fluid.CPUPlace())
            x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32")
            out_result, = exe.run(feed={'x': x}, fetch_list=[out])
            self.assertEqual((out_result == (2 / x)).all(), True)


S
ShenLiang 已提交
314
class TestDivideOp(unittest.TestCase):
315

S
ShenLiang 已提交
316 317 318 319
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')
320

S
ShenLiang 已提交
321 322
            y_1 = paddle.divide(x, y, name='div_res')
            self.assertEqual(('div_res' in y_1.name), True)
323 324

    def test_dygraph(self):
S
ShenLiang 已提交
325 326 327 328 329 330 331 332 333
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = paddle.divide(x, y)
            np_z = z.numpy()
            z_expected = np.array([2., 0.6, 2.])
            self.assertEqual((np_z == z_expected).all(), True)
334 335


336
class TestComplexElementwiseDivOp(OpTest):
337

338 339
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
340
        self.python_api = paddle.divide
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
        self.init_base_dtype()
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
        self.x = np.random.random(
            (2, 3, 4, 5)).astype(self.dtype) + 1J * np.random.random(
                (2, 3, 4, 5)).astype(self.dtype)
        self.y = np.random.random(
            (2, 3, 4, 5)).astype(self.dtype) + 1J * np.random.random(
                (2, 3, 4, 5)).astype(self.dtype)
        self.out = self.x / self.y

    def init_grad_input_output(self):
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1J * np.ones(
            (2, 3, 4, 5), self.dtype)
        self.grad_x = self.grad_out / np.conj(self.y)
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)

    def test_check_output(self):
H
hong 已提交
371
        self.check_output(check_eager=False)
372 373

    def test_check_grad_normal(self):
374 375 376 377
        self.check_grad(['X', 'Y'],
                        'Out',
                        user_defined_grads=[self.grad_x, self.grad_y],
                        user_defined_grad_outputs=[self.grad_out])
378 379

    def test_check_grad_ingore_x(self):
380 381 382 383 384
        self.check_grad(['Y'],
                        'Out',
                        no_grad_set=set("X"),
                        user_defined_grads=[self.grad_y],
                        user_defined_grad_outputs=[self.grad_out])
385 386

    def test_check_grad_ingore_y(self):
387 388 389 390 391
        self.check_grad(['X'],
                        'Out',
                        no_grad_set=set('Y'),
                        user_defined_grads=[self.grad_x],
                        user_defined_grad_outputs=[self.grad_out])
392 393


C
chentianyu03 已提交
394
class TestRealComplexElementwiseDivOp(TestComplexElementwiseDivOp):
395

C
chentianyu03 已提交
396 397 398 399 400 401 402 403 404 405 406 407 408 409
    def init_input_output(self):
        self.x = np.random.random((2, 3, 4, 5)).astype(self.dtype)
        self.y = np.random.random(
            (2, 3, 4, 5)).astype(self.dtype) + 1J * np.random.random(
                (2, 3, 4, 5)).astype(self.dtype)
        self.out = self.x / self.y

    def init_grad_input_output(self):
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1J * np.ones(
            (2, 3, 4, 5), self.dtype)
        self.grad_x = np.real(self.grad_out / np.conj(self.y))
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)


G
gongweibao 已提交
410
if __name__ == '__main__':
411
    paddle.enable_static()
G
gongweibao 已提交
412
    unittest.main()