test_elementwise_sub_op.py 20.1 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6 7 8
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
9 10 11 12 13
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
import os
G
gongweibao 已提交
16
import unittest
17
import warnings
18

G
gongweibao 已提交
19
import numpy as np
20
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
21

C
chentianyu03 已提交
22
import paddle
23
import paddle.fluid as fluid
24
from paddle.fluid.layer_helper import LayerHelper
G
gongweibao 已提交
25 26 27 28 29


class TestElementwiseOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_sub"
30 31
        self.python_api = paddle.subtract
        self.prim_op_type = "prim"
G
gongweibao 已提交
32
        self.inputs = {
33
            'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
34
            'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
G
gongweibao 已提交
35 36
        }
        self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
37 38
        self.if_check_prim()
        self.if_skip_cinn()
G
gongweibao 已提交
39 40 41 42 43

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
44
        self.check_grad(['X', 'Y'], 'Out', check_prim=self.check_prim)
G
gongweibao 已提交
45 46

    def test_check_grad_ingore_x(self):
47
        self.check_grad(
48 49 50 51 52
            ['Y'],
            'Out',
            max_relative_error=0.005,
            no_grad_set=set("X"),
            check_prim=self.check_prim,
53
        )
G
gongweibao 已提交
54 55

    def test_check_grad_ingore_y(self):
56
        self.check_grad(
57 58 59 60 61
            ['X'],
            'Out',
            max_relative_error=0.005,
            no_grad_set=set('Y'),
            check_prim=self.check_prim,
62
        )
G
gongweibao 已提交
63

64 65 66 67 68 69
    def if_check_prim(self):
        self.check_prim = True

    def if_skip_cinn(self):
        pass

G
gongweibao 已提交
70

71 72 73
class TestElementwiseSubOp_ZeroDim1(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_sub"
74 75
        self.python_api = paddle.subtract
        self.prim_op_type = "prim"
76 77 78 79 80
        self.inputs = {
            'X': np.random.uniform(0.1, 1, []).astype("float64"),
            'Y': np.random.uniform(0.1, 1, []).astype("float64"),
        }
        self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
81 82 83 84 85 86 87 88
        self.if_check_prim()
        self.if_skip_cinn()

    def if_check_prim(self):
        self.check_prim = True

    def if_skip_cinn(self):
        self.enable_cinn = False
89 90 91 92 93


class TestElementwiseSubOp_ZeroDim2(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_sub"
94 95
        self.python_api = paddle.subtract
        self.prim_op_type = "prim"
96 97 98 99 100
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
            'Y': np.random.uniform(0.1, 1, []).astype("float64"),
        }
        self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
101 102 103 104 105 106 107 108
        self.if_check_prim()
        self.if_skip_cinn()

    def if_check_prim(self):
        self.check_prim = True

    def if_skip_cinn(self):
        self.enable_cinn = False
109 110 111 112 113


class TestElementwiseSubOp_ZeroDim3(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_sub"
114 115
        self.python_api = paddle.subtract
        self.prim_op_type = "prim"
116 117 118 119 120
        self.inputs = {
            'X': np.random.uniform(0.1, 1, []).astype("float64"),
            'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
        }
        self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
121 122 123 124 125 126 127 128
        self.if_check_prim()
        self.if_skip_cinn()

    def if_check_prim(self):
        self.check_prim = True

    def if_skip_cinn(self):
        self.enable_cinn = False
129 130


131 132 133
class TestBF16ElementwiseOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_sub"
134 135
        self.python_api = paddle.subtract
        self.prim_op_type = "prim"
136 137 138 139 140 141 142
        self.dtype = np.uint16
        x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
        out = x - y

        self.inputs = {
            'X': convert_float_to_uint16(x),
143
            'Y': convert_float_to_uint16(y),
144 145
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}
146 147
        self.if_check_prim()
        self.if_skip_cinn()
148 149 150 151 152

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
153
        self.check_grad(['X', 'Y'], 'Out', check_prim=self.check_prim)
154 155

    def test_check_grad_ingore_x(self):
156 157 158
        self.check_grad(
            ['Y'], 'Out', no_grad_set=set("X"), check_prim=self.check_prim
        )
159 160

    def test_check_grad_ingore_y(self):
161 162 163 164 165 166 167 168 169
        self.check_grad(
            ['X'], 'Out', no_grad_set=set('Y'), check_prim=self.check_prim
        )

    def if_check_prim(self):
        self.check_prim = True

    def if_skip_cinn(self):
        self.enable_cinn = False
170 171


172
@skip_check_grad_ci(
173 174
    reason="[skip shape check] Use y_shape(1) to test broadcast."
)
175 176 177
class TestElementwiseSubOp_scalar(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_sub"
178 179
        self.python_api = paddle.subtract
        self.prim_op_type = "prim"
180
        self.inputs = {
181
            'X': np.random.rand(10, 3, 4).astype(np.float64),
182
            'Y': np.random.rand(1).astype(np.float64),
183 184
        }
        self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
185
        self.if_check_prim()
186 187


G
gongweibao 已提交
188 189 190
class TestElementwiseSubOp_Vector(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_sub"
191 192
        self.python_api = paddle.subtract
        self.prim_op_type = "prim"
G
gongweibao 已提交
193
        self.inputs = {
194 195
            'X': np.random.random((100,)).astype("float64"),
            'Y': np.random.random((100,)).astype("float64"),
G
gongweibao 已提交
196 197
        }
        self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
198
        self.if_check_prim()
G
gongweibao 已提交
199 200


201
class TestElementwiseSubOp_broadcast_O(TestElementwiseOp):
G
gongweibao 已提交
202 203
    def setUp(self):
        self.op_type = "elementwise_sub"
204
        self.python_api = paddle.subtract
G
gongweibao 已提交
205
        self.inputs = {
206
            'X': np.random.rand(100, 3, 2).astype(np.float64),
207
            'Y': np.random.rand(100).astype(np.float64),
G
gongweibao 已提交
208 209 210 211
        }

        self.attrs = {'axis': 0}
        self.outputs = {
212
            'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1)
G
gongweibao 已提交
213 214
        }

215 216 217 218 219
    def test_check_output(self):
        self.check_output(check_dygraph=False)

    def test_check_grad_normal(self):
        self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
G
gongweibao 已提交
220

221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
    def test_check_grad_ingore_x(self):
        self.check_grad(
            ['Y'],
            'Out',
            max_relative_error=0.005,
            no_grad_set=set("X"),
            check_dygraph=False,
        )

    def test_check_grad_ingore_y(self):
        self.check_grad(
            ['X'],
            'Out',
            max_relative_error=0.005,
            no_grad_set=set('Y'),
            check_dygraph=False,
        )


class TestElementwiseSubOp_broadcast_1(TestElementwiseSubOp_broadcast_O):
G
gongweibao 已提交
241 242
    def setUp(self):
        self.op_type = "elementwise_sub"
243
        self.python_api = paddle.subtract
G
gongweibao 已提交
244
        self.inputs = {
245
            'X': np.random.rand(2, 100, 3).astype(np.float64),
246
            'Y': np.random.rand(100).astype(np.float64),
G
gongweibao 已提交
247 248 249 250
        }

        self.attrs = {'axis': 1}
        self.outputs = {
251
            'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 100, 1)
G
gongweibao 已提交
252 253 254 255 256 257
        }


class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_sub"
258 259
        self.python_api = paddle.subtract
        self.prim_op_type = "prim"
G
gongweibao 已提交
260
        self.inputs = {
261
            'X': np.random.rand(2, 3, 100).astype(np.float64),
262
            'Y': np.random.rand(100).astype(np.float64),
G
gongweibao 已提交
263 264 265
        }

        self.outputs = {
266
            'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100)
G
gongweibao 已提交
267
        }
268 269 270 271
        self.if_check_prim()

    def if_check_prim(self):
        self.check_prim = True
G
gongweibao 已提交
272 273


274
class TestElementwiseSubOp_broadcast_3(TestElementwiseSubOp_broadcast_O):
G
gongweibao 已提交
275 276
    def setUp(self):
        self.op_type = "elementwise_sub"
277
        self.python_api = paddle.subtract
G
gongweibao 已提交
278
        self.inputs = {
279
            'X': np.random.rand(2, 10, 12, 3).astype(np.float64),
280
            'Y': np.random.rand(10, 12).astype(np.float64),
G
gongweibao 已提交
281 282 283 284
        }

        self.attrs = {'axis': 1}
        self.outputs = {
285
            'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1)
G
gongweibao 已提交
286 287 288
        }


289 290 291
class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_sub"
292 293
        self.python_api = paddle.subtract
        self.prim_op_type = "prim"
294
        self.inputs = {
295
            'X': np.random.rand(2, 5, 3, 12).astype(np.float64),
296
            'Y': np.random.rand(2, 5, 1, 12).astype(np.float64),
297 298
        }
        self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
299 300 301 302
        self.if_check_prim()

    def if_check_prim(self):
        self.check_prim = True
303 304


305 306 307
class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_sub"
308 309
        self.python_api = paddle.subtract
        self.prim_op_type = "prim"
310
        self.inputs = {
311
            'X': np.random.rand(2, 3, 100).astype(np.float64),
312
            'Y': np.random.rand(1, 1, 100).astype(np.float64),
313 314
        }
        self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
315 316 317 318
        self.if_check_prim()

    def if_check_prim(self):
        self.check_prim = True
319 320 321 322 323


class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_sub"
324 325
        self.python_api = paddle.subtract
        self.prim_op_type = "prim"
326
        self.inputs = {
327
            'X': np.random.rand(10, 3, 1, 4).astype(np.float64),
328
            'Y': np.random.rand(10, 1, 12, 1).astype(np.float64),
329 330
        }
        self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
331 332 333 334
        self.if_check_prim()

    def if_check_prim(self):
        self.check_prim = True
335 336 337 338 339


class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
    def setUp(self):
        self.op_type = "elementwise_sub"
340 341
        self.python_api = paddle.subtract
        self.prim_op_type = "prim"
342
        self.inputs = {
343
            'X': np.random.rand(10, 12).astype(np.float64),
344
            'Y': np.random.rand(2, 3, 10, 12).astype(np.float64),
345 346 347 348
        }
        self.attrs = {'axis': 2}

        self.outputs = {
349
            'Out': self.inputs['X'].reshape(1, 1, 10, 12) - self.inputs['Y']
350
        }
351 352 353 354
        self.if_check_prim()

    def if_check_prim(self):
        self.check_prim = True
355 356


C
chentianyu03 已提交
357 358 359
class TestComplexElementwiseSubOp(OpTest):
    def setUp(self):
        self.op_type = "elementwise_sub"
360 361
        self.python_api = paddle.subtract
        self.prim_op_type = "prim"
C
chentianyu03 已提交
362 363 364 365 366 367 368
        self.dtype = np.float64
        self.shape = (2, 3, 4, 5)
        self.init_input_output()
        self.init_grad_input_output()

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(self.x),
369
            'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
C
chentianyu03 已提交
370 371 372
        }
        self.attrs = {'axis': -1, 'use_mkldnn': False}
        self.outputs = {'Out': self.out}
373 374
        self.if_check_prim()
        self.if_skip_cinn()
C
chentianyu03 已提交
375 376 377 378 379 380

    def init_base_dtype(self):
        self.dtype = np.float64

    def init_input_output(self):
        self.x = np.random.random(self.shape).astype(
381 382
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
C
chentianyu03 已提交
383
        self.y = np.random.random(self.shape).astype(
384 385
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
C
chentianyu03 已提交
386 387 388
        self.out = self.x - self.y

    def init_grad_input_output(self):
389 390 391
        self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones(
            self.shape, self.dtype
        )
C
chentianyu03 已提交
392 393 394 395 396 397 398
        self.grad_x = self.grad_out
        self.grad_y = -self.grad_out

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
399 400 401 402 403
        self.check_grad(
            ['X', 'Y'],
            'Out',
            user_defined_grads=[self.grad_x, self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
404
            check_prim=self.check_prim,
405
        )
C
chentianyu03 已提交
406 407

    def test_check_grad_ingore_x(self):
408 409 410 411 412 413
        self.check_grad(
            ['Y'],
            'Out',
            no_grad_set=set("X"),
            user_defined_grads=[self.grad_y],
            user_defined_grad_outputs=[self.grad_out],
414
            check_prim=self.check_prim,
415
        )
C
chentianyu03 已提交
416 417

    def test_check_grad_ingore_y(self):
418 419 420 421 422 423
        self.check_grad(
            ['X'],
            'Out',
            no_grad_set=set('Y'),
            user_defined_grads=[self.grad_x],
            user_defined_grad_outputs=[self.grad_out],
424
            check_prim=self.check_prim,
425
        )
C
chentianyu03 已提交
426

427 428 429 430 431 432
    def if_skip_cinn(self):
        self.enable_cinn = False

    def if_check_prim(self):
        self.check_prim = True

C
chentianyu03 已提交
433 434 435 436 437

class TestRealComplexElementwiseSubOp(TestComplexElementwiseSubOp):
    def init_input_output(self):
        self.x = np.random.random(self.shape).astype(self.dtype)
        self.y = np.random.random(self.shape).astype(
438 439
            self.dtype
        ) + 1j * np.random.random(self.shape).astype(self.dtype)
C
chentianyu03 已提交
440 441 442
        self.out = self.x - self.y

    def init_grad_input_output(self):
443 444 445
        self.grad_out = np.ones(self.shape, self.dtype) + 1j * np.ones(
            self.shape, self.dtype
        )
C
chentianyu03 已提交
446 447 448
        self.grad_x = np.real(self.grad_out)
        self.grad_y = -self.grad_out

449 450 451 452 453 454
    def if_skip_cinn(self):
        self.enable_cinn = False

    def if_check_prim(self):
        self.check_prim = False

C
chentianyu03 已提交
455

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
class TestSubtractApi(unittest.TestCase):
    def _executed_api(self, x, y, name=None):
        return paddle.subtract(x, y, name)

    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')

            y_1 = self._executed_api(x, y, name='subtract_res')
            self.assertEqual(('subtract_res' in y_1.name), True)

    def test_declarative(self):
        with fluid.program_guard(fluid.Program()):

            def gen_data():
                return {
                    "x": np.array([2, 3, 4]).astype('float32'),
474
                    "y": np.array([1, 5, 2]).astype('float32'),
475 476 477 478 479 480 481 482
                }

            x = fluid.data(name="x", shape=[3], dtype='float32')
            y = fluid.data(name="y", shape=[3], dtype='float32')
            z = self._executed_api(x, y)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
483
            z_expected = np.array([1.0, -2.0, 2.0])
484 485 486 487 488 489 490 491 492 493
            self.assertEqual((z_value == z_expected).all(), True)

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 4]).astype('float64')
            np_y = np.array([1, 5, 2]).astype('float64')
            x = fluid.dygraph.to_variable(np_x)
            y = fluid.dygraph.to_variable(np_y)
            z = self._executed_api(x, y)
            np_z = z.numpy()
494
            z_expected = np.array([1.0, -2.0, 2.0])
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
            self.assertEqual((np_z == z_expected).all(), True)


class TestSubtractInplaceApi(TestSubtractApi):
    def _executed_api(self, x, y, name=None):
        return x.subtract_(y, name)


class TestSubtractInplaceBroadcastSuccess(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 4).astype('float')
        self.y_numpy = np.random.rand(3, 4).astype('float')

    def test_broadcast_success(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)
        inplace_result = x.subtract_(y)
        numpy_result = self.x_numpy - self.y_numpy
        self.assertEqual((inplace_result.numpy() == numpy_result).all(), True)
        paddle.enable_static()


class TestSubtractInplaceBroadcastSuccess2(TestSubtractInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float')
        self.y_numpy = np.random.rand(3, 1).astype('float')


class TestSubtractInplaceBroadcastSuccess3(TestSubtractInplaceBroadcastSuccess):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float')
        self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float')


class TestSubtractInplaceBroadcastError(unittest.TestCase):
    def init_data(self):
        self.x_numpy = np.random.rand(3, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')

    def test_broadcast_errors(self):
        paddle.disable_static()
        self.init_data()
        x = paddle.to_tensor(self.x_numpy)
        y = paddle.to_tensor(self.y_numpy)

        def broadcast_shape_error():
            x.subtract_(y)

        self.assertRaises(ValueError, broadcast_shape_error)
        paddle.enable_static()


class TestSubtractInplaceBroadcastError2(TestSubtractInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


class TestSubtractInplaceBroadcastError3(TestSubtractInplaceBroadcastError):
    def init_data(self):
        self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float')
        self.y_numpy = np.random.rand(2, 3, 4).astype('float')


561
class TestFloatElementwiseSubop(unittest.TestCase):
562
    def test_dygraph_sub(self):
563 564 565 566 567 568 569 570 571 572 573
        paddle.disable_static()

        np_a = np.random.random((2, 3, 4)).astype(np.float64)
        np_b = np.random.random((2, 3, 4)).astype(np.float64)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: tensor - tensor
        expect_out = np_a - np_b
        actual_out = tensor_a - tensor_b
574 575 576
        np.testing.assert_allclose(
            actual_out, expect_out, rtol=1e-07, atol=1e-07
        )
577 578 579 580

        # normal case: tensor - scalar
        expect_out = np_a - 1
        actual_out = tensor_a - 1
581 582 583
        np.testing.assert_allclose(
            actual_out, expect_out, rtol=1e-07, atol=1e-07
        )
584 585 586 587

        # normal case: scalar - tenor
        expect_out = 1 - np_a
        actual_out = 1 - tensor_a
588 589 590
        np.testing.assert_allclose(
            actual_out, expect_out, rtol=1e-07, atol=1e-07
        )
591 592 593 594

        paddle.enable_static()


595
class TestFloatElementwiseSubop1(unittest.TestCase):
596
    def test_dygraph_sub(self):
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
        paddle.disable_static()

        np_a = np.random.random((2, 3, 4)).astype(np.float32)
        np_b = np.random.random((2, 3, 4)).astype(np.float32)

        tensor_a = paddle.to_tensor(np_a, dtype="float32")
        tensor_b = paddle.to_tensor(np_b, dtype="float32")

        # normal case: nparray - tenor
        expect_out = np_a - np_b
        actual_out = np_a - tensor_b
        np.testing.assert_allclose(
            actual_out, expect_out, rtol=1e-07, atol=1e-07
        )

        # normal case: tenor - nparray
        actual_out = tensor_a - np_b
        np.testing.assert_allclose(
            actual_out, expect_out, rtol=1e-07, atol=1e-07
        )

        paddle.enable_static()


621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
class TestTensorSubAPIWarnings(unittest.TestCase):
    def test_warnings(self):

        with warnings.catch_warnings(record=True) as context:
            warnings.simplefilter("always")

            paddle.enable_static()
            helper = LayerHelper("elementwise_sub")
            data = paddle.static.data(
                name='data', shape=[None, 3, 32, 32], dtype='float32'
            )
            out = helper.create_variable_for_type_inference(dtype=data.dtype)
            os.environ['FLAGS_print_extra_attrs'] = "1"
            helper.append_op(
                type="elementwise_sub",
                inputs={'X': data, 'Y': data},
                outputs={'Out': out},
                attrs={'axis': 1, 'use_mkldnn': False},
            )
            self.assertTrue(
                "op elementwise_sub's attr axis = 1 is not the default value: -1"
                in str(context[-1].message)
            )
            os.environ['FLAGS_print_extra_attrs'] = "0"


G
gongweibao 已提交
647
if __name__ == '__main__':
C
chentianyu03 已提交
648
    paddle.enable_static()
G
gongweibao 已提交
649
    unittest.main()