test_inplace.py 22.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

G
GGBond8488 已提交
15
import functools
16
import unittest
17

18 19 20 21 22 23
import numpy as np

import paddle


class TestInplace(unittest.TestCase):
24
    def test_forward_version(self):
25 26 27 28 29 30 31
        with paddle.fluid.dygraph.guard():
            var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32))
            self.assertEqual(var.inplace_version, 0)

            var[0] = 1.1
            self.assertEqual(var.inplace_version, 1)

32
            paddle.assign(paddle.ones(shape=[3]), var)
33 34 35 36 37 38 39 40

            # NOTE(liym27): assign(input, output) is an inplace operation for output.
            # There is inplace-related processing for api assign, var.inplace_version should be 2 not 1.
            self.assertEqual(var.inplace_version, 2)

            var[2] = 3
            self.assertEqual(var.inplace_version, 3)

41
    def test_backward_error(self):
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
        # It raises an error because the inplace operator will result
        # in incorrect gradient computation.
        with paddle.fluid.dygraph.guard():
            var_a = paddle.ones(shape=[4, 2, 3], dtype="float32")
            var_a.stop_gradient = False

            var_b = var_a**2

            # Here, the gradient computation will use the value of var_b
            var_c = var_b**2
            var_b[1:2] = 3.3  # var_b is modified inplace after using it

            var_d = var_b**2

            loss = paddle.nn.functional.relu(var_c + var_d)
57
            with self.assertRaisesRegex(
58 59 60 61 62
                RuntimeError,
                "received tensor_version:{} != wrapper_version_snapshot:{}".format(
                    1, 0
                ),
            ):
63
                loss.backward()
64

65
    def test_backward_success_1(self):
66 67 68 69 70 71 72 73 74 75 76 77 78 79
        # var_b is modified inplace before using it, the inplace operator doesn't result
        # in incorrect gradient computation.
        with paddle.fluid.dygraph.guard():
            var_a = paddle.ones(shape=[4, 2, 3], dtype="float32")
            var_a.stop_gradient = False

            var_b = var_a**2
            var_b[1:2] = 3  # var_b is modified inplace before using it

            # Here, the gradient computation will use the value of var_b
            var_c = var_b**2
            loss = var_c.sum()
            loss.backward()

80
    def test_backward_success_2(self):
81 82 83 84 85 86 87 88 89 90
        # Although var_b is modified inplace after using it, it does not used in gradient computation.
        # The inplace operator doesn't result in incorrect gradient computation.
        with paddle.fluid.dygraph.guard():
            var_a = paddle.ones(shape=[4, 2, 3], dtype="float32")
            var_a.stop_gradient = False

            var_b = var_a**2

            var_b[1:2] = 3  # var_b is modified inplace before using it

91 92 93
            var_c = (
                var_b + var_b
            )  # Here, the grad op of sum doesn't use the value of var_b
94 95 96 97 98 99 100
            loss = var_c.sum()

            var_b[1:2] = 3  # var_b is modified inplace after using it

            loss.backward()


101 102 103
class TestDygraphInplace(unittest.TestCase):
    def setUp(self):
        self.init_data()
104
        self.set_np_compare_func()
105 106

    def init_data(self):
107
        self.input_var_numpy = np.random.uniform(-5, 5, [10, 20, 1])
108 109
        self.dtype = "float32"

110 111 112
    def set_np_compare_func(self):
        self.np_compare = np.array_equal

113 114 115 116 117 118
    def non_inplace_api_processing(self, var):
        return paddle.squeeze(var)

    def inplace_api_processing(self, var):
        return paddle.squeeze_(var)

119
    def test_inplace_api(self):
120 121 122 123
        var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
        inplace_var = self.inplace_api_processing(var)
        self.assertTrue(id(var) == id(inplace_var))

124
        inplace_var[0] = 2.0
125
        np.testing.assert_array_equal(var.numpy(), inplace_var.numpy())
126

G
GGBond8488 已提交
127 128 129 130 131 132 133 134
    def test_forward_result(self):
        var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
        no_inplace_var = self.non_inplace_api_processing(var)
        inplace_var = self.inplace_api_processing(var)
        np.testing.assert_array_equal(
            no_inplace_var.numpy(), inplace_var.numpy()
        )

135
    def test_forward_version(self):
136 137 138 139 140 141 142
        with paddle.fluid.dygraph.guard():
            var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
            self.assertEqual(var.inplace_version, 0)

            inplace_var = self.inplace_api_processing(var)
            self.assertEqual(var.inplace_version, 1)

143
            inplace_var[0] = 2.0
144 145 146 147 148
            self.assertEqual(var.inplace_version, 2)

            inplace_var = self.inplace_api_processing(inplace_var)
            self.assertEqual(var.inplace_version, 3)

149
    def test_leaf_inplace_var_error(self):
150 151 152 153 154 155 156 157 158
        with paddle.fluid.dygraph.guard():
            var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
            var.stop_gradient = False

            def leaf_inplace_error():
                self.inplace_api_processing(var)

            self.assertRaises(ValueError, leaf_inplace_error)

159
    def test_backward_error(self):
160 161 162 163 164 165 166 167 168 169 170 171 172
        # It raises an error because the inplace operator will result
        # in incorrect gradient computation.
        with paddle.fluid.dygraph.guard():
            var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
            var_a.stop_gradient = False

            var_b = var_a**2

            # Here, the gradient computation will use the value of var_b
            var_c = var_b**2
            self.inplace_api_processing(var_b)

            loss = paddle.nn.functional.relu(var_c)
173
            with self.assertRaisesRegex(
174 175 176 177 178
                RuntimeError,
                "received tensor_version:{} != wrapper_version_snapshot:{}".format(
                    1, 0
                ),
            ):
179
                loss.backward()
180

181
    def test_backward_success_1(self):
182 183 184 185 186 187 188 189 190
        # var_b is modified inplace before using it, the inplace operator doesn't result
        # in incorrect gradient computation.
        grad_var_a, grad_var_a_inplace = 0, 1
        with paddle.fluid.dygraph.guard():
            var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
            var_a.stop_gradient = False

            var_b = var_a**2
            var_c = self.inplace_api_processing(
191 192
                var_b
            )  # var_b is modified inplace before using it
193 194 195 196 197

            # Here, the gradient computation will use the value of var_b
            var_d = var_c**2
            loss = var_d.sum()
            loss.backward()
198
            grad_var_a_inplace = var_a.grad.numpy()
199 200 201 202 203 204 205 206 207 208

        with paddle.fluid.dygraph.guard():
            var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
            var_a.stop_gradient = False

            var_b = var_a**2
            var_c = self.non_inplace_api_processing(var_b)
            var_d = var_c**2
            loss = var_d.sum()
            loss.backward()
209
            grad_var_a = var_a.grad.numpy()
210

211
        self.assertTrue(self.np_compare(grad_var_a_inplace, grad_var_a))
212

213
    def test_backward_success_2(self):
214 215 216 217 218 219 220 221 222 223
        # Although var_b is modified inplace after using it, it does not used in gradient computation.
        # The inplace operator doesn't result in incorrect gradient computation.
        grad_var_a, grad_var_a_inplace = 0, 1
        with paddle.fluid.dygraph.guard():
            var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
            var_a.stop_gradient = False

            var_b = var_a**2

            var_c = self.inplace_api_processing(
224 225
                var_b
            )  # var_b is modified inplace before using it
226

227 228 229
            var_d = (
                var_c + var_c
            )  # Here, the grad op of sum doesn't use the value of var_b
230 231 232
            loss = var_d.sum()

            loss.backward()
233
            grad_var_a_inplace = var_a.grad.numpy()
234 235 236 237 238 239 240

        with paddle.fluid.dygraph.guard():
            var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
            var_a.stop_gradient = False

            var_b = var_a**2

241
            var_c = self.non_inplace_api_processing(var_b)
242

243 244 245
            var_d = (
                var_c + var_c
            )  # Here, the grad op of sum doesn't use the value of var_b
246 247 248
            loss = var_d.sum()

            loss.backward()
249
            grad_var_a = var_a.grad.numpy()
250
        np.testing.assert_array_equal(grad_var_a_inplace, grad_var_a)
251 252


G
GGBond8488 已提交
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
class TestDygraphInplaceWithContinuous(TestDygraphInplace):
    def init_data(self):
        self.input_var_numpy = np.random.uniform(-5, 5, [10, 20, 1])
        self.dtype = "float32"

    def set_np_compare_func(self):
        np_array_equal_with_nan = functools.partial(
            np.array_equal, equal_nan=True
        )
        self.np_compare = np_array_equal_with_nan

    def non_inplace_api_processing(self, var):
        return paddle.sin(var)

    def inplace_api_processing(self, var):
        return paddle.sin_(var)

    def test_continuous_inplace_backward(self):
        # The api that only relies on input to calculate the gradient will copy input before
        # the inpalce calculation, so here supports continuous inpalce backward calculation.
        grad_var_a, grad_var_a_inplace = 0, 1
        with paddle.fluid.dygraph.guard():
            var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
            var_a.stop_gradient = False

            var_b = var_a**2
            var_c = self.inplace_api_processing(var_b)
            var_d = self.inplace_api_processing(var_c)
            loss = var_d.sum()
            loss.backward()
            grad_var_a_inplace = var_a.grad.numpy()

        with paddle.fluid.dygraph.guard():
            var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
            var_a.stop_gradient = False

            var_b = var_a**2
            var_c = self.non_inplace_api_processing(var_b)
            var_d = self.non_inplace_api_processing(var_c)
            loss = var_d.sum()
            loss.backward()
            grad_var_a = var_a.grad.numpy()

        self.assertTrue(self.np_compare(grad_var_a_inplace, grad_var_a))


299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
class TestDygraphInplaceUnsqueeze(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
        return paddle.unsqueeze(var, -1)

    def inplace_api_processing(self, var):
        return paddle.unsqueeze_(var, -1)


class TestDygraphInplaceReshape(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
        return paddle.reshape(var, [-1])

    def inplace_api_processing(self, var):
        return paddle.reshape_(var, [-1])


315 316
class TestDygraphInplaceReshapeTensor(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
317
        shape = paddle.to_tensor([-1])
318 319 320
        return paddle.reshape(var, shape)

    def inplace_api_processing(self, var):
321
        shape = paddle.to_tensor([-1])
322 323 324
        return paddle.reshape_(var, shape)


325 326 327 328 329 330 331 332
class TestDygraphInplaceFlatten(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
        return var.flatten()

    def inplace_api_processing(self, var):
        return var.flatten_()


333 334 335 336 337 338 339
class TestDygraphInplaceScatter(TestDygraphInplace):
    def init_data(self):
        self.input_var_numpy = np.array([[1, 1], [2, 2], [3, 3]])
        self.dtype = "float32"

    def non_inplace_api_processing(self, var):
        index = paddle.to_tensor([2, 1, 0, 1], dtype='int64')
340 341 342
        updates = paddle.to_tensor(
            [[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32'
        )
343 344 345 346 347

        return paddle.scatter(var, index, updates, overwrite=False)

    def inplace_api_processing(self, var):
        index = paddle.to_tensor([2, 1, 0, 1], dtype='int64')
348 349 350
        updates = paddle.to_tensor(
            [[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32'
        )
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386

        return paddle.scatter_(var, index, updates, overwrite=False)


class TestDygraphInplaceElu(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
        return paddle.nn.functional.elu(var)

    def inplace_api_processing(self, var):
        return paddle.nn.functional.elu_(var)


class TestDygraphInplaceRelu(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
        return paddle.nn.functional.relu(var)

    def inplace_api_processing(self, var):
        return paddle.nn.functional.relu_(var)


class TestDygraphInplaceSoftmax(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
        return paddle.nn.functional.softmax(var)

    def inplace_api_processing(self, var):
        return paddle.nn.functional.softmax_(var)


class TestDygraphInplaceTanh(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
        return paddle.tanh(var)

    def inplace_api_processing(self, var):
        return paddle.tanh_(var)


387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
class TestDygraphInplaceCeil(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
        return var.ceil()

    def inplace_api_processing(self, var):
        return var.ceil_()


class TestDygraphInplaceFloor(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
        return var.floor()

    def inplace_api_processing(self, var):
        return var.floor_()


class TestDygraphInplaceExp(TestDygraphInplace):
    def set_np_compare_func(self):
        self.np_compare = np.allclose

    def non_inplace_api_processing(self, var):
        return var.exp()

    def inplace_api_processing(self, var):
        return var.exp_()


class TestDygraphInplaceReciprocal(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
        return var.reciprocal()

    def inplace_api_processing(self, var):
        return var.reciprocal_()


class TestDygraphInplaceRound(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
        return var.round()

    def inplace_api_processing(self, var):
        return var.round_()


class TestDygraphInplaceSqrt(TestDygraphInplace):
    def init_data(self):
        self.input_var_numpy = np.random.uniform(0, 5, [10, 20, 1])
        self.dtype = "float32"

    def non_inplace_api_processing(self, var):
        return var.sqrt()

    def inplace_api_processing(self, var):
        return var.sqrt_()


class TestDygraphInplaceRsqrt(TestDygraphInplaceSqrt):
    def non_inplace_api_processing(self, var):
        return var.rsqrt()

    def inplace_api_processing(self, var):
        return var.rsqrt_()


class TestDygraphInplaceClip(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
        return var.clip(0.6, 1.5)

    def inplace_api_processing(self, var):
        return var.clip_(0.6, 1.5)


class TestDygraphInplaceScale(TestDygraphInplace):
    def non_inplace_api_processing(self, var):
        return var.scale(scale=2.0, bias=3.0)

    def inplace_api_processing(self, var):
        return var.scale_(scale=2.0, bias=3.0)


class TestDygraphInplaceAdd(TestDygraphInplace):
    def init_data(self):
        self.input_var_numpy = np.random.rand(2, 3, 4)
        self.dtype = "float32"
470
        self.input_var_numpy_2 = np.random.rand(2, 3, 4).astype(self.dtype)
471 472

    def non_inplace_api_processing(self, var):
473 474
        input_var_2 = paddle.to_tensor(self.input_var_numpy_2)
        return var.add(input_var_2)
475 476

    def inplace_api_processing(self, var):
477 478
        input_var_2 = paddle.to_tensor(self.input_var_numpy_2)
        return var.add_(input_var_2)
479 480 481 482


class TestDygraphInplaceSubtract(TestDygraphInplaceAdd):
    def non_inplace_api_processing(self, var):
483 484
        input_var_2 = paddle.to_tensor(self.input_var_numpy_2)
        return var.subtract(input_var_2)
485 486

    def inplace_api_processing(self, var):
487 488
        input_var_2 = paddle.to_tensor(self.input_var_numpy_2)
        return var.subtract_(input_var_2)
489 490


491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
class TestDygraphInplaceRemainder(TestDygraphInplaceAdd):
    def non_inplace_api_processing(self, var):
        input_var_2 = paddle.to_tensor(self.input_var_numpy_2)
        return var.remainder(input_var_2)

    def inplace_api_processing(self, var):
        input_var_2 = paddle.to_tensor(self.input_var_numpy_2)
        return var.remainder_(input_var_2)

    def test_leaf_inplace_var_error(self):
        pass

    def test_backward_error(self):
        pass

    def test_backward_success_1(self):
        pass

    def test_backward_success_2(self):
        pass


513
class TestLossIsInplaceVar(unittest.TestCase):
514
    def test_loss_is_inplace_var(self):
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
        with paddle.fluid.dygraph.guard():
            var_a = paddle.ones((2, 2))
            var_a.stop_gradient = False

            var_b = var_a * 2
            loss = var_b.tanh_()

            loss.backward()
            inplace_grad_var_a = var_a.grad.numpy()

        with paddle.fluid.dygraph.guard():
            var_a = paddle.ones((2, 2))
            var_a.stop_gradient = False

            var_b = var_a * 2
            loss = var_b.tanh()

            loss.backward()
            grad_var_a = var_a.grad.numpy()

535
        np.testing.assert_array_equal(inplace_grad_var_a, grad_var_a)
536 537


538
class TestContinuouslyInplace(unittest.TestCase):
539
    def test_continuously_inplace(self):
540 541 542 543 544 545 546 547 548 549 550
        a = paddle.rand([2, 3])
        a.stop_gradient = False
        b = a * 2

        b.reshape_([-1])
        b.reshape_([2, 3])
        b.reshape_([-1])

        b.backward()


551 552
class TestGetitemBeforeInplace(unittest.TestCase):
    def test_getitem_before_inplace(self):
553 554 555 556 557 558 559 560 561
        a = paddle.ones(shape=[4, 2, 3], dtype="float32")
        a.stop_gradient = False
        b = a**2
        b[0] = 3
        # getitem has no_need_buffer input
        c = b[0:2]
        loss = c.sum()
        b[1] = 2
        loss.backward()
562 563


G
GGBond8488 已提交
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
class TestDygraphInplaceAsin(TestDygraphInplaceWithContinuous):
    def non_inplace_api_processing(self, var):
        return paddle.asin(var)

    def inplace_api_processing(self, var):
        return paddle.asin_(var)


class TestDygraphInplaceSinh(TestDygraphInplaceWithContinuous):
    def non_inplace_api_processing(self, var):
        return paddle.sinh(var)

    def inplace_api_processing(self, var):
        return paddle.sinh_(var)


class TestDygraphInplaceAsinh(TestDygraphInplaceWithContinuous):
    def non_inplace_api_processing(self, var):
        return paddle.asinh(var)

    def inplace_api_processing(self, var):
        return paddle.asinh_(var)


class TestDygraphInplaceAbs(TestDygraphInplaceWithContinuous):
    def non_inplace_api_processing(self, var):
        return paddle.abs(var)

    def inplace_api_processing(self, var):
        return paddle.abs_(var)


class TestDygraphInplaceCos(TestDygraphInplaceWithContinuous):
    def non_inplace_api_processing(self, var):
        return paddle.cos(var)

    def inplace_api_processing(self, var):
        return paddle.cos_(var)


class TestDygraphInplaceCosh(TestDygraphInplaceWithContinuous):
    def non_inplace_api_processing(self, var):
        return paddle.cosh(var)

    def inplace_api_processing(self, var):
        return paddle.cosh_(var)


class TestDygraphInplaceAcos(TestDygraphInplaceWithContinuous):
    def non_inplace_api_processing(self, var):
        return paddle.acos(var)

    def inplace_api_processing(self, var):
        return paddle.acos_(var)


class TestDygraphInplaceAcosh(TestDygraphInplaceWithContinuous):
    def non_inplace_api_processing(self, var):
        return paddle.acosh(var)

    def inplace_api_processing(self, var):
        return paddle.acosh_(var)


class TestDygraphInplaceTan(TestDygraphInplaceWithContinuous):
    def non_inplace_api_processing(self, var):
        return paddle.tan(var)

    def inplace_api_processing(self, var):
        return paddle.tan_(var)


class TestDygraphInplaceATan(TestDygraphInplaceWithContinuous):
    def non_inplace_api_processing(self, var):
        return paddle.atan(var)

    def inplace_api_processing(self, var):
        return paddle.atan_(var)


class TestDygraphInplaceATanh(TestDygraphInplaceWithContinuous):
    def non_inplace_api_processing(self, var):
        return paddle.atanh(var)

    def inplace_api_processing(self, var):
        return paddle.atanh_(var)


class TestDygraphInplaceAddMM(TestDygraphInplaceWithContinuous):
    def init_data(self):
        self.input_var_numpy = np.random.uniform(-5, 5, [10, 10])
        self.dtype = "float32"
        self.x = paddle.randn([10, 10], dtype="float32")
        self.y = paddle.randn([10, 10], dtype="float32")

    def non_inplace_api_processing(self, var):
        return paddle.addmm(var, x=self.x, y=self.y)

    def inplace_api_processing(self, var):
        return paddle.addmm_(var, x=self.x, y=self.y)

    def test_errors(self):
        var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
        x1 = paddle.randn([10])
        self.assertRaises(ValueError, paddle.addmm_, var, x1, self.y)

        y1 = paddle.randn([12, 10])
        self.assertRaises(ValueError, paddle.addmm_, var, self.x, y1)
        x2 = paddle.randn([12, 10])
        self.assertRaises(ValueError, paddle.addmm_, var, x2, self.y)
        var1 = paddle.randn([1, 5])
        self.assertRaises(ValueError, paddle.addmm_, var1, x2, self.y)
        y2 = paddle.randn([10, 12])
        self.assertRaises(ValueError, paddle.addmm_, var, self.x, y2)
        var2 = paddle.randn([6])
        self.assertRaises(ValueError, paddle.addmm_, var2, self.x, self.y)
        var3 = paddle.randn([2, 3, 4])
        self.assertRaises(ValueError, paddle.addmm_, var3, self.x, self.y)


class TestDygraphInplacePowerScalar(TestDygraphInplaceWithContinuous):
    def inplace_api_processing(self, var):
        return paddle.pow_(var, 2)

    def non_inplace_api_processing(self, var):
        return paddle.pow(var, 2)

    def test_type_error(self):
        var = paddle.to_tensor(self.input_var_numpy, dtype=self.dtype)
        with self.assertRaisesRegex(
            TypeError,
            'y must be scalar type, but received: %s ' % (type([2])),
        ):
            paddle.pow_(var, [2])


700 701
if __name__ == '__main__':
    unittest.main()