test_elementwise_div_op.py 20.0 KB
Newer Older
1
#  Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

G
gongweibao 已提交
15
import unittest
16

G
gongweibao 已提交
17
import numpy as np
18
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
19

20 21 22
import paddle
from paddle import fluid
from paddle.fluid import core
G
gongweibao 已提交
23 24


25 26 27 28 29 30 31
def broadcast_wrapper(shape=[1, 10, 12, 1]):
    def div_wrapper(x, y, axis=-1):
        return paddle.divide(x, y.reshape(shape))

    return div_wrapper


G
gongweibao 已提交
32 33 34
class ElementwiseDivOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
35
        self.python_api = paddle.divide
36
        self.public_python_api = paddle.divide
37
        self.prim_op_type = "prim"
38
        self.init_args()
W
Wu Yi 已提交
39
        self.init_dtype()
40
        self.init_shape()
41
        self.if_check_prim()
42
        self.if_enable_cinn()
43 44 45 46 47 48

        x = self.gen_data(self.x_shape).astype(self.val_dtype)
        y = self.gen_data(self.y_shape).astype(self.val_dtype)
        out = self.compute_output(x, y).astype(self.val_dtype)
        grad_out = np.ones(out.shape).astype(self.val_dtype)
        grad_x = self.compute_gradient_x(grad_out, y).astype(self.val_dtype)
49 50 51
        grad_y = self.compute_gradient_y(grad_out, out, y).astype(
            self.val_dtype
        )
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

        # Convert np.float32 data to np.uint16 for bfloat16 Paddle OP
        if self.dtype == np.uint16:
            x = convert_float_to_uint16(x)
            y = convert_float_to_uint16(y)
            out = convert_float_to_uint16(out)
            grad_out = convert_float_to_uint16(grad_out)
            grad_x = convert_float_to_uint16(grad_x)
            grad_y = convert_float_to_uint16(grad_y)

        self.inputs = {'X': x, 'Y': y}
        self.outputs = {'Out': out}
        self.grad_out = grad_out
        self.grad_x = grad_x
        self.grad_y = grad_y

68
    def if_enable_cinn(self):
69
        self.enable_cinn = True
70

71 72 73
    def init_args(self):
        self.check_dygraph = True
        self.place = None
H
hong 已提交
74

75 76 77
    def init_dtype(self):
        self.dtype = np.float64
        self.val_dtype = np.float64
G
gongweibao 已提交
78

79 80 81
    def init_shape(self):
        self.x_shape = [13, 17]
        self.y_shape = [13, 17]
H
hong 已提交
82

83 84 85
    def if_check_prim(self):
        self.check_prim = True

86 87
    def gen_data(self, shape):
        return np.random.uniform(0.1, 1, shape)
G
gongweibao 已提交
88

89 90
    def compute_output(self, x, y):
        return x / y
G
gongweibao 已提交
91

92 93
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y
G
gongweibao 已提交
94

95 96
    def compute_gradient_y(self, grad_out, out, y):
        return -1 * grad_out * out / y
G
gongweibao 已提交
97

98 99 100 101 102 103 104 105
    def test_check_output(self):
        if self.place is None:
            self.check_output()
        else:
            self.check_output_with_place(self.place)

    def test_check_gradient(self):
        check_list = []
106 107 108 109 110 111 112 113 114 115 116 117 118
        check_list.append(
            {
                'grad': ['X', 'Y'],
                'no_grad': None,
                'val_grad': [self.grad_x, self.grad_y],
            }
        )
        check_list.append(
            {'grad': ['Y'], 'no_grad': set('X'), 'val_grad': [self.grad_y]}
        )
        check_list.append(
            {'grad': ['X'], 'no_grad': set('Y'), 'val_grad': [self.grad_x]}
        )
119 120 121 122 123 124
        for check_option in check_list:
            check_args = [check_option['grad'], 'Out']
            check_kwargs = {
                'no_grad_set': check_option['no_grad'],
                'user_defined_grads': check_option['val_grad'],
                'user_defined_grad_outputs': [self.grad_out],
125
                'check_dygraph': self.check_dygraph,
126
                'check_prim': self.check_prim,
127 128 129 130 131 132
            }
            if self.place is None:
                self.check_grad(*check_args, **check_kwargs)
            else:
                check_args.insert(0, self.place)
                self.check_grad_with_place(*check_args, **check_kwargs)
W
Wu Yi 已提交
133

G
gongweibao 已提交
134

135 136 137 138 139 140
class TestElementwiseDivPrimOpFp32(ElementwiseDivOp):
    def init_dtype(self):
        self.dtype = np.float32
        self.val_dtype = np.float32


141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = []
        self.y_shape = []


class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [13, 17]
        self.y_shape = []

    def compute_output(self, x, y):
        return x / y.reshape([1, 1])

    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape([1, 1])

    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape([1, 1]))


class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = []
        self.y_shape = [13, 17]

    def compute_output(self, x, y):
        return x.reshape([1, 1]) / y

    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y)

    def compute_gradient_y(self, grad_out, out, y):
        return -1 * grad_out * out / y


177 178 179 180 181
@unittest.skipIf(
    not core.is_compiled_with_cuda()
    or not core.is_bfloat16_supported(core.CUDAPlace(0)),
    "core is not compiled with CUDA or not support the bfloat16",
)
182 183 184 185 186 187 188
class TestElementwiseDivOpBF16(ElementwiseDivOp):
    def init_args(self):
        # In due to output data type inconsistence of bfloat16 paddle op, we disable the dygraph check.
        self.check_dygraph = False
        self.place = core.CUDAPlace(0)

    def init_dtype(self):
189
        self.dtype = np.uint16
190
        self.val_dtype = np.float32
191

192 193 194
    def init_shape(self):
        self.x_shape = [12, 13]
        self.y_shape = [12, 13]
195

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
    def test_check_gradient(self):
        check_list = []
        check_list.append(
            {
                'grad': ['X', 'Y'],
                'no_grad': None,
                'val_grad': [self.grad_x, self.grad_y],
            }
        )
        check_list.append(
            {'grad': ['Y'], 'no_grad': set('X'), 'val_grad': [self.grad_y]}
        )
        check_list.append(
            {'grad': ['X'], 'no_grad': set('Y'), 'val_grad': [self.grad_x]}
        )
        for check_option in check_list:
            check_args = [check_option['grad'], 'Out']
            check_kwargs = {
                'no_grad_set': check_option['no_grad'],
                'user_defined_grads': check_option['val_grad'],
                'user_defined_grad_outputs': [self.grad_out],
                'check_dygraph': self.check_dygraph,
            }
            if self.place is None:
                self.check_grad(*check_args, **check_kwargs)
            else:
                check_args.insert(0, self.place)
                self.check_grad_with_place(*check_args, **check_kwargs)

225 226 227 228
    # elementwise_pow does't support bfloat16
    def if_check_prim(self):
        self.check_prim = False

229 230 231
    def if_enable_cinn(self):
        self.enable_cinn = False

232

233
@skip_check_grad_ci(
234 235
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
236 237 238 239
class TestElementwiseDivOpScalar(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [20, 3, 4]
        self.y_shape = [1]
240

241 242
    def compute_gradient_y(self, grad_out, out, y):
        return np.array([np.sum(-1 * grad_out * out / y)])
243 244


245 246 247 248
class TestElementwiseDivOpVector(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [100]
        self.y_shape = [100]
249

250

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
class TestElementwiseDivOpNoPrim(ElementwiseDivOp):
    def test_check_gradient(self):
        check_list = []
        check_list.append(
            {
                'grad': ['X', 'Y'],
                'no_grad': None,
                'val_grad': [self.grad_x, self.grad_y],
            }
        )
        check_list.append(
            {'grad': ['Y'], 'no_grad': set('X'), 'val_grad': [self.grad_y]}
        )
        check_list.append(
            {'grad': ['X'], 'no_grad': set('Y'), 'val_grad': [self.grad_x]}
        )
        for check_option in check_list:
            check_args = [check_option['grad'], 'Out']
            check_kwargs = {
                'no_grad_set': check_option['no_grad'],
                'user_defined_grads': check_option['val_grad'],
                'user_defined_grad_outputs': [self.grad_out],
                'check_dygraph': self.check_dygraph,
            }
            if self.place is None:
                self.check_grad(*check_args, **check_kwargs)
            else:
                check_args.insert(0, self.place)
                self.check_grad_with_place(*check_args, **check_kwargs)


class TestElementwiseDivOpBroadcast0(TestElementwiseDivOpNoPrim):
283 284 285 286
    def init_shape(self):
        self.x_shape = [100, 3, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 0}
287
        self.python_api = broadcast_wrapper(shape=[100, 1, 1])
288

289 290
    def compute_output(self, x, y):
        return x / y.reshape(100, 1, 1)
291

292 293
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(100, 1, 1)
G
gongweibao 已提交
294

295 296
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(100, 1, 1), axis=(1, 2))
G
gongweibao 已提交
297

298

299
class TestElementwiseDivOpBroadcast1(TestElementwiseDivOpNoPrim):
300 301 302 303
    def init_shape(self):
        self.x_shape = [2, 100, 4]
        self.y_shape = [100]
        self.attrs = {'axis': 1}
304
        self.python_api = broadcast_wrapper(shape=[1, 100, 1])
G
gongweibao 已提交
305

306 307
    def compute_output(self, x, y):
        return x / y.reshape(1, 100, 1)
G
gongweibao 已提交
308

309 310
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 100, 1)
311

312 313
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 100, 1), axis=(0, 2))
G
gongweibao 已提交
314 315


316
class TestElementwiseDivOpBroadcast2(TestElementwiseDivOpNoPrim):
317 318 319
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [100]
320
        self.python_api = broadcast_wrapper(shape=[1, 1, 100])
321

322 323
    def compute_output(self, x, y):
        return x / y.reshape(1, 1, 100)
G
gongweibao 已提交
324

325 326
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 1, 100)
G
gongweibao 已提交
327

328 329
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y.reshape(1, 1, 100), axis=(0, 1))
G
gongweibao 已提交
330

331

332
class TestElementwiseDivOpBroadcast3(TestElementwiseDivOpNoPrim):
333 334 335
    def init_shape(self):
        self.x_shape = [2, 10, 12, 5]
        self.y_shape = [10, 12]
G
gongweibao 已提交
336
        self.attrs = {'axis': 1}
337
        self.python_api = broadcast_wrapper(shape=[1, 10, 12, 1])
G
gongweibao 已提交
338

339 340
    def compute_output(self, x, y):
        return x / y.reshape(1, 10, 12, 1)
G
gongweibao 已提交
341

342 343
    def compute_gradient_x(self, grad_out, y):
        return grad_out / y.reshape(1, 10, 12, 1)
344

345
    def compute_gradient_y(self, grad_out, out, y):
346 347 348
        return np.sum(
            -1 * grad_out * out / y.reshape(1, 10, 12, 1), axis=(0, 3)
        )
349 350


351 352 353 354
class TestElementwiseDivOpBroadcast4(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 50]
        self.y_shape = [2, 1, 50]
355

356 357
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1)).reshape(2, 1, 50)
358

359

360 361 362 363
class TestElementwiseDivOpBroadcast5(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 4, 20]
        self.y_shape = [2, 3, 1, 20]
364

365 366
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(2)).reshape(2, 3, 1, 20)
367

368

369 370 371 372
class TestElementwiseDivOpCommonuse1(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [2, 3, 100]
        self.y_shape = [1, 1, 100]
373

374 375 376 377 378 379 380 381 382 383 384
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(0, 1)).reshape(1, 1, 100)


class TestElementwiseDivOpCommonuse2(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [30, 3, 1, 5]
        self.y_shape = [30, 1, 4, 1]

    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(2)).reshape(30, 3, 1, 5)
385

386 387 388 389 390 391 392 393
    def compute_gradient_y(self, grad_out, out, y):
        return np.sum(-1 * grad_out * out / y, axis=(1, 3)).reshape(30, 1, 4, 1)


class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp):
    def init_shape(self):
        self.x_shape = [10, 12]
        self.y_shape = [2, 3, 10, 12]
394 395
        self.attrs = {'axis': 2}

396 397
    def compute_gradient_x(self, grad_out, y):
        return np.sum(grad_out / y, axis=(0, 1))
398 399


400
class TestElementwiseDivOpInt(ElementwiseDivOp):
401
    def init_dtype(self):
402
        self.dtype = np.int32
403
        self.val_dtype = np.int32
404

405 406
    def gen_data(self, shape):
        return np.random.randint(1, 5, size=shape)
407

408 409
    def compute_output(self, x, y):
        return x // y
410 411


412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
def create_test_fp16_class(parent, max_relative_error=2e-3):
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
    class TestElementwiseDivFP16Op(parent):
        def init_dtype(self):
            self.dtype = np.float16
            self.val_dtype = np.float16

        def if_enable_cinn(self):
            self.enable_cinn = True

        def test_check_gradient(self):
            check_list = []
            check_list.append(
                {
                    'grad': ['X', 'Y'],
                    'no_grad': None,
                    'val_grad': [self.grad_x, self.grad_y],
                }
            )
            check_list.append(
                {'grad': ['Y'], 'no_grad': set('X'), 'val_grad': [self.grad_y]}
            )
            check_list.append(
                {'grad': ['X'], 'no_grad': set('Y'), 'val_grad': [self.grad_x]}
            )
            for check_option in check_list:
                check_args = [check_option['grad'], 'Out']
                check_kwargs = {
                    'no_grad_set': check_option['no_grad'],
                    'user_defined_grads': check_option['val_grad'],
                    'user_defined_grad_outputs': [self.grad_out],
                    'check_dygraph': self.check_dygraph,
                    'max_relative_error': max_relative_error,
                }
                if self.place is None:
                    self.check_grad(*check_args, **check_kwargs)
                else:
                    check_args.insert(0, self.place)
                    self.check_grad_with_place(*check_args, **check_kwargs)

454
    cls_name = "{}_{}".format(parent.__name__, "Fp16")
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
    TestElementwiseDivFP16Op.__name__ = cls_name
    globals()[cls_name] = TestElementwiseDivFP16Op


create_test_fp16_class(ElementwiseDivOp)
create_test_fp16_class(TestElementwiseDivOp_ZeroDim1)
create_test_fp16_class(TestElementwiseDivOp_ZeroDim2)
create_test_fp16_class(TestElementwiseDivOp_ZeroDim3)
create_test_fp16_class(TestElementwiseDivOpScalar)
create_test_fp16_class(TestElementwiseDivOpVector)
create_test_fp16_class(TestElementwiseDivOpBroadcast0)
create_test_fp16_class(TestElementwiseDivOpBroadcast1)
create_test_fp16_class(TestElementwiseDivOpBroadcast2)
create_test_fp16_class(TestElementwiseDivOpBroadcast3)
create_test_fp16_class(TestElementwiseDivOpBroadcast4)
create_test_fp16_class(TestElementwiseDivOpBroadcast5)
create_test_fp16_class(TestElementwiseDivOpCommonuse1)
create_test_fp16_class(TestElementwiseDivOpCommonuse2)
create_test_fp16_class(TestElementwiseDivOpXsizeLessThanYsize)
474

W
Wu Yi 已提交
475

476 477 478
class TestElementwiseDivBroadcast(unittest.TestCase):
    def test_shape_with_batch_sizes(self):
        with fluid.program_guard(fluid.Program()):
479
            x_var = paddle.static.data(
480 481 482
                name='x', dtype='float32', shape=[None, 3, None, None]
            )
            one = 2.0
483 484 485
            out = one / x_var
            exe = fluid.Executor(fluid.CPUPlace())
            x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32")
486
            (out_result,) = exe.run(feed={'x': x}, fetch_list=[out])
487 488 489
            self.assertEqual((out_result == (2 / x)).all(), True)


S
ShenLiang 已提交
490 491 492
class TestDivideOp(unittest.TestCase):
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
493 494
            x = paddle.static.data(name="x", shape=[2, 3], dtype="float32")
            y = paddle.static.data(name='y', shape=[2, 3], dtype='float32')
495

S
ShenLiang 已提交
496 497
            y_1 = paddle.divide(x, y, name='div_res')
            self.assertEqual(('div_res' in y_1.name), True)
498 499

    def test_dygraph(self):
S
ShenLiang 已提交
500 501 502 503 504 505 506
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = paddle.divide(x, y)
            np_z = z.numpy()
507
            z_expected = np.array([2.0, 0.6, 2.0])
S
ShenLiang 已提交
508
            self.assertEqual((np_z == z_expected).all(), True)
509 510


511 512 513
class TestComplexElementwiseDivOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_div"
H
hong 已提交
514
        self.python_api = paddle.divide
515 516 517 518 519 520
        self.init_base_dtype()
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
521
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
522 523 524 525 526 527 528 529
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
530 531 532 533 534 535
        self.x = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
536 537 538
        self.out = self.x / self.y

    def init_grad_input_output(self):
539 540 541
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
542 543 544 545
        self.grad_x = self.grad_out / np.conj(self.y)
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)

    def test_check_output(self):
546
        self.check_output()
547 548

    def test_check_grad_normal(self):
549 550 551 552 553 554
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
555 556

    def test_check_grad_ingore_x(self):
557 558 559 560 561 562 563
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
        )
564 565

    def test_check_grad_ingore_y(self):
566 567 568 569 570 571 572
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out],
        )
573 574


C
chentianyu03 已提交
575 576 577
class TestRealComplexElementwiseDivOp(TestComplexElementwiseDivOp):
    def init_input_output(self):
        self.x = np.random.random((2, 3, 4, 5)).astype(self.dtype)
578 579 580
        self.y = np.random.random((2, 3, 4, 5)).astype(
            self.dtype
        ) + 1j * np.random.random((2, 3, 4, 5)).astype(self.dtype)
C
chentianyu03 已提交
581 582 583
        self.out = self.x / self.y

    def init_grad_input_output(self):
584 585 586
        self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1j * np.ones(
            (2, 3, 4, 5), self.dtype
        )
C
chentianyu03 已提交
587 588 589 590
        self.grad_x = np.real(self.grad_out / np.conj(self.y))
        self.grad_y = -self.grad_out * np.conj(self.x / self.y / self.y)


591
class TestElementwiseDivop(unittest.TestCase):
592
    def test_dygraph_div(self):
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
        paddle.disable_static()

        np_a = np.random.random((2, 3, 4)).astype(np.float32)
        np_b = np.random.random((2, 3, 4)).astype(np.float32)
        np_a[np.abs(np_a) < 0.0005] = 0.002
        np_b[np.abs(np_b) < 0.0005] = 0.002

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: nparray / tenor
        expect_out = np_a / np_b
        actual_out = np_a / tensor_b
        np.testing.assert_allclose(actual_out, expect_out)

        # normal case: tensor / nparray
        actual_out = tensor_a / np_b
        np.testing.assert_allclose(actual_out, expect_out)

        paddle.enable_static()


G
gongweibao 已提交
615
if __name__ == '__main__':
616
    paddle.enable_static()
G
gongweibao 已提交
617
    unittest.main()