test_concat_op.py 21.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import unittest
16 17

import gradient_checker
18
import numpy as np
19 20 21 22 23
from decorator_helper import prog_scope

import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, core, program_guard
24 25 26
from paddle.fluid.tests.unittests.op_test import (
    OpTest,
    convert_float_to_uint16,
27
    skip_check_grad_ci,
28
)
29 30


31
class TestConcatOp(OpTest):
32
    def setUp(self):
33
        self.op_type = "concat"
34
        self.python_api = paddle.concat
W
wangzhen38 已提交
35 36
        self.prim_op_type = "prim"
        self.enable_cinn = False
37
        self.dtype = self.get_dtype()
C
chengduoZH 已提交
38 39 40
        self.init_test_data()
        self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]}
        self.attrs = {'axis': self.axis}
41 42 43 44 45 46
        if self.axis < 0:
            self.actual_axis = self.axis + len(self.x0.shape)
            self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
        else:
            self.actual_axis = self.axis

C
chengduoZH 已提交
47
        self.outputs = {
48 49 50
            'Out': np.concatenate(
                (self.x0, self.x1, self.x2), axis=self.actual_axis
            )
C
chengduoZH 已提交
51
        }
52

53
    def get_dtype(self):
54
        return "float64"
55

56
    def test_check_output(self):
57 58 59 60
        if self.dtype == np.uint16:
            place = core.CUDAPlace(0)
            self.check_output_with_place(place)
        else:
61
            self.check_output(check_eager=True)
62

63
    def test_check_grad(self):
64 65
        if self.dtype == np.uint16:
            place = core.CUDAPlace(0)
W
wangzhen38 已提交
66 67 68
            self.check_grad_with_place(place, ['x0'], 'Out', check_prim=True)
            self.check_grad_with_place(place, ['x1'], 'Out', check_prim=True)
            self.check_grad_with_place(place, ['x2'], 'Out', check_prim=True)
69
        else:
W
wangzhen38 已提交
70 71 72
            self.check_grad(['x0'], 'Out', check_eager=True, check_prim=True)
            self.check_grad(['x1'], 'Out', check_eager=True, check_prim=True)
            self.check_grad(['x2'], 'Out', check_eager=True, check_prim=True)
C
chengduoZH 已提交
73 74

    def init_test_data(self):
75 76 77 78 79 80 81 82 83 84 85
        if self.dtype == np.uint16:
            x0 = np.random.random((5, 1, 4, 5)).astype(np.float32)
            self.x0 = convert_float_to_uint16(x0)
            x1 = np.random.random((5, 2, 4, 5)).astype(np.float32)
            self.x1 = convert_float_to_uint16(x1)
            x2 = np.random.random((5, 3, 4, 5)).astype(np.float32)
            self.x2 = convert_float_to_uint16(x2)
        else:
            self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype)
            self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
            self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
C
chengduoZH 已提交
86 87 88
        self.axis = 1


89
class TestConcatOp2(TestConcatOp):
C
chengduoZH 已提交
90
    def init_test_data(self):
91 92 93
        self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
        self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
        self.x2 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
C
chengduoZH 已提交
94
        self.axis = 1
95

96

97
@skip_check_grad_ci(
98 99
    reason="The function 'check_grad' for large inputs is too slow."
)
100 101
class TestConcatOp3(TestConcatOp):
    def init_test_data(self):
102 103 104
        self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype)
        self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
        self.x2 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
105 106 107 108 109 110
        self.axis = 1

    def test_check_grad(self):
        pass


111
@skip_check_grad_ci(
112
    reason="This test will meet fetch error when there is a null grad. The detailed information is in PR#17015."
113
)
114 115
class TestConcatOp4(TestConcatOp):
    def init_test_data(self):
116 117 118
        self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
        self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
        self.x2 = np.random.random((0, 3, 4, 5)).astype(self.dtype)
119 120 121 122 123 124
        self.axis = 0

    def test_check_grad(self):
        pass


125 126
class TestConcatOp5(TestConcatOp):
    def init_test_data(self):
Z
zhupengyang 已提交
127 128 129
        self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype)
        self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
        self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
130 131 132
        self.axis = -3


133 134 135 136
class TestConcatOp6(TestConcatOp):
    def setUp(self):
        self.op_type = "concat"
        self.dtype = self.get_dtype()
137
        self.python_api = paddle.concat
W
wangzhen38 已提交
138 139
        self.prim_op_type = "prim"
        self.enable_cinn = False
140 141 142 143
        self.init_test_data()
        self.lod = [[20, 80]]
        self.out_lod = [[20, 80, 20, 80, 20, 80]]
        self.inputs = {
144 145 146 147 148
            'X': [
                ('x0', (self.x0, self.lod)),
                ('x1', (self.x1, self.lod)),
                ('x2', (self.x2, self.lod)),
            ]
149 150 151 152 153 154 155 156 157 158 159
        }
        self.attrs = {'axis': self.axis}
        if self.axis < 0:
            self.actual_axis = self.axis + len(self.x0.shape)
            self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
        else:
            self.actual_axis = self.axis
        out = np.concatenate((self.x0, self.x1, self.x2), axis=self.actual_axis)
        self.outputs = {'Out': (out, self.out_lod)}

    def test_check_output(self):
160
        self.check_output(check_eager=True)
161 162

    def test_check_grad(self):
163 164 165
        self.check_grad(['x0'], 'Out', check_eager=True)
        self.check_grad(['x1'], 'Out', check_eager=True)
        self.check_grad(['x2'], 'Out', check_eager=True)
166 167 168 169 170 171 172 173

    def init_test_data(self):
        self.x0 = np.random.random([100]).astype(self.dtype)
        self.x1 = np.random.random([100]).astype(self.dtype)
        self.x2 = np.random.random([100]).astype(self.dtype)
        self.axis = 0


W
wangzhen38 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
class TestConcatOp7(TestConcatOp):
    def setUp(self):
        self.op_type = "concat"
        self.python_api = paddle.concat
        self.prim_op_type = "prim"
        self.enable_cinn = True
        self.dtype = self.get_dtype()
        self.init_test_data()
        self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]}
        self.attrs = {'axis': self.axis}
        if self.axis < 0:
            self.actual_axis = self.axis + len(self.x0.shape)
            self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
        else:
            self.actual_axis = self.axis

        self.outputs = {
            'Out': np.concatenate(
                (self.x0, self.x1, self.x2), axis=self.actual_axis
            )
        }

    def get_dtype(self):
        return "float64"

    def test_check_output(self):
        self.check_output(check_eager=True)

    def test_check_grad(self):
        self.check_grad(['x0'], 'Out', check_eager=True, check_prim=True)
        self.check_grad(['x1'], 'Out', check_eager=True, check_prim=True)
        self.check_grad(['x2'], 'Out', check_eager=True, check_prim=True)

    def init_test_data(self):
        if self.dtype == np.uint16:
            x0 = np.random.random((5, 1, 4, 5)).astype(np.float32)
            self.x0 = convert_float_to_uint16(x0)
            x1 = np.random.random((5, 2, 4, 5)).astype(np.float32)
            self.x1 = convert_float_to_uint16(x1)
            x2 = np.random.random((5, 3, 4, 5)).astype(np.float32)
            self.x2 = convert_float_to_uint16(x2)
        else:
            self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype)
            self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
            self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
        self.axis = 1


222 223 224 225
def create_test_AxisTensor(parent):
    class TestConcatAxisTensor(parent):
        def setUp(self):
            self.op_type = "concat"
226
            self.python_api = paddle.concat
227 228 229
            self.dtype = self.get_dtype()
            self.init_test_data()

W
wangzhen38 已提交
230 231
            self.prim_op_type = "prim"
            self.enable_cinn = False
232 233
            self.inputs = {
                'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)],
234
                'AxisTensor': np.array([self.axis]).astype("int32"),
235 236 237 238 239
            }
            self.attrs = {}

            if self.axis < 0:
                self.actual_axis = self.axis + len(self.x0.shape)
240 241 242
                self.actual_axis = (
                    self.actual_axis if self.actual_axis > 0 else 0
                )
243 244 245 246
            else:
                self.actual_axis = self.axis

            self.outputs = {
247 248 249
                'Out': np.concatenate(
                    (self.x0, self.x1, self.x2), axis=self.actual_axis
                )
250 251 252 253 254 255 256 257 258 259 260 261
            }

    cls_name = "{0}_{1}".format(parent.__name__, "AxisTensor")
    TestConcatAxisTensor.__name__ = cls_name
    globals()[cls_name] = TestConcatAxisTensor


create_test_AxisTensor(TestConcatOp)
create_test_AxisTensor(TestConcatOp2)
create_test_AxisTensor(TestConcatOp3)
create_test_AxisTensor(TestConcatOp4)
create_test_AxisTensor(TestConcatOp5)
262
create_test_AxisTensor(TestConcatOp6)
263

264
# ----------------Concat Fp16----------------
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281


def create_test_fp16(parent):
    class TestConcatFp16(parent):
        def get_dtype(self):
            return np.float16

    cls_name = "{0}_{1}".format(parent.__name__, "Fp16")
    TestConcatFp16.__name__ = cls_name
    globals()[cls_name] = TestConcatFp16


create_test_fp16(TestConcatOp)
create_test_fp16(TestConcatOp2)
create_test_fp16(TestConcatOp3)
create_test_fp16(TestConcatOp4)
create_test_fp16(TestConcatOp5)
282
create_test_fp16(TestConcatOp6)
283

284

285
# ----------------Concat Bf16----------------
286
def create_test_bf16(parent):
287 288 289
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
290 291 292 293 294 295 296 297 298 299 300 301
    class TestConcatBf16(parent):
        def get_dtype(self):
            return np.uint16

    cls_name = "{0}_{1}".format(parent.__name__, "Bf16")
    TestConcatBf16.__name__ = cls_name
    globals()[cls_name] = TestConcatBf16


create_test_bf16(TestConcatOp)


302
class TestConcatOpError(unittest.TestCase):
303 304
    def test_errors(self):
        with program_guard(Program(), Program()):
305
            # The input type of concat_op should be list.
306

G
GGBond8488 已提交
307
            x1 = paddle.static.data(shape=[-1, 4], dtype='int32', name='x1')
308 309
            paddle.concat(x1)

310
            # The item in input must be Variable.
311 312 313 314 315 316
            x2 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
            x3 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
317
            self.assertRaises(TypeError, paddle.concat, [x2])
318
            # The input dtype of concat_op must be float16, float32, float64, int32, int64.
319

G
GGBond8488 已提交
320 321
            x4 = paddle.static.data(shape=[-1, 4], dtype='uint8', name='x4')
            x5 = paddle.static.data(shape=[-1, 4], dtype='uint8', name='x5')
322
            self.assertRaises(TypeError, paddle.concat, [x4, x5])
G
GGBond8488 已提交
323 324 325
            x6 = paddle.static.data(shape=[-1, 4], dtype='float16', name='x6')
            x7 = paddle.static.data(shape=[-1, 4], dtype='float16', name='x7')
            x8 = paddle.static.data(shape=[-1, 4], dtype='float32', name='x8')
326
            paddle.concat([x6, x7])
327

328 329
            # The type of axis in concat_op should be int or Variable.
            def test_axis_type():
330
                paddle.concat([x6, x7], 3.2)
331 332 333

            self.assertRaises(TypeError, test_axis_type)

334
            def test_input_same_dtype():
335
                paddle.concat([x7, x8])
336 337 338

            self.assertRaises(TypeError, test_input_same_dtype)

339

340
class TestConcatAPI(unittest.TestCase):
341
    def test_fluid_api(self):
342
        paddle.enable_static()
343
        x_1 = fluid.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1')
344
        paddle.concat([x_1, x_1], 0)
345 346 347 348 349

        input_2 = np.random.random([2, 1, 4, 5]).astype("int32")
        input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
        x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
        x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
350 351
        positive_1_int32 = fluid.layers.fill_constant([1], "int32", 1)
        positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1)
352 353 354
        out_1 = paddle.concat([x_2, x_3], axis=1)
        out_2 = paddle.concat([x_2, x_3], axis=positive_1_int32)
        out_3 = paddle.concat([x_2, x_3], axis=positive_1_int64)
355 356

        exe = fluid.Executor(place=fluid.CPUPlace())
357 358 359 360 361
        [res_1, res_2, res_3] = exe.run(
            fluid.default_main_program(),
            feed={"x_1": input_2, "x_2": input_2, "x_3": input_3},
            fetch_list=[out_1, out_2, out_3],
        )
362 363
        assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1))
        assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1))
364
        assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1))
365

366
    def test_api(self):
367
        paddle.enable_static()
368 369 370
        x_1 = paddle.fluid.data(
            shape=[None, 1, 4, 5], dtype='int32', name='x_1'
        )
371 372 373 374 375 376
        paddle.concat([x_1, x_1], 0)

        input_2 = np.random.random([2, 1, 4, 5]).astype("int32")
        input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
        x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
        x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
377 378 379
        positive_1_int32 = paddle.fluid.layers.fill_constant([1], "int32", 1)
        positive_1_int64 = paddle.fluid.layers.fill_constant([1], "int64", 1)
        negative_int64 = paddle.fluid.layers.fill_constant([1], "int64", -3)
380 381 382 383 384
        out_1 = paddle.concat(x=[x_2, x_3], axis=1)
        out_2 = paddle.concat(x=[x_2, x_3], axis=positive_1_int32)
        out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64)
        out_4 = paddle.concat(x=[x_2, x_3], axis=negative_int64)

385
        exe = paddle.static.Executor(place=paddle.CPUPlace())
386 387 388 389 390
        [res_1, res_2, res_3, res_4] = exe.run(
            paddle.static.default_main_program(),
            feed={"x_1": input_2, "x_2": input_2, "x_3": input_3},
            fetch_list=[out_1, out_2, out_3, out_4],
        )
391 392 393 394 395 396 397 398 399
        assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1))
        assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1))
        assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1))
        assert np.array_equal(res_4, np.concatenate((input_2, input_3), axis=1))

    def test_imperative(self):
        in1 = np.array([[1, 2, 3], [4, 5, 6]])
        in2 = np.array([[11, 12, 13], [14, 15, 16]])
        in3 = np.array([[21, 22], [23, 24]])
400
        paddle.disable_static()
Z
Zhou Wei 已提交
401 402 403
        x1 = paddle.to_tensor(in1)
        x2 = paddle.to_tensor(in2)
        x3 = paddle.to_tensor(in3)
404
        out1 = paddle.concat([x1, x2, x3], axis=-1)
405 406 407 408
        out2 = paddle.concat(x=[x1, x2], axis=0)
        np_out1 = np.concatenate([in1, in2, in3], axis=-1)
        np_out2 = np.concatenate([in1, in2], axis=0)
        paddle.enable_static()
409 410 411 412 413 414
        self.assertEqual((out1.numpy() == np_out1).all(), True)
        self.assertEqual((out2.numpy() == np_out2).all(), True)

    def test_errors(self):
        with program_guard(Program(), Program()):
            # The item in input must be Variable.
415 416 417 418 419 420
            x2 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
            x3 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace()
            )
421 422
            self.assertRaises(TypeError, paddle.concat, [x2])
            # The input dtype of concat_op must be float16, float32, float64, int32, int64.
423 424
            x4 = paddle.fluid.data(shape=[4], dtype='uint8', name='x4')
            x5 = paddle.fluid.data(shape=[4], dtype='uint8', name='x5')
425
            self.assertRaises(TypeError, paddle.concat, [x4, x5])
426 427

            # The type of axis in concat_op should be int or Variable.
G
GGBond8488 已提交
428 429 430
            x6 = paddle.static.data(shape=[-1, 4], dtype='float16', name='x6')
            x7 = paddle.static.data(shape=[-1, 4], dtype='float16', name='x7')
            x8 = paddle.static.data(shape=[-1, 4], dtype='float32', name='x8')
431 432 433 434 435 436 437 438 439 440 441

            def test_axis_type():
                paddle.concat([x6, x7], 3.2)

            self.assertRaises(TypeError, test_axis_type)

            def test_input_same_dtype():
                paddle.concat([x7, x8])

            self.assertRaises(TypeError, test_input_same_dtype)

442

443 444 445 446 447 448 449
class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
    """
    Test concat api when the input(x) is a LoDTensorArray.
    """

    def setUp(self):
        self.axis = 1
450
        self.python = paddle.concat
451 452 453
        self.iter_num = 3
        self.input_shape = [2, 3]
        self.x = np.random.random(self.input_shape).astype("float32")
454 455 456 457 458
        self.place = (
            fluid.CUDAPlace(0)
            if fluid.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
459

460 461 462 463 464
    def set_program(self, use_fluid_api):
        paddle.enable_static()
        if use_fluid_api:
            self.program = fluid.Program()
            with fluid.program_guard(self.program):
465
                input = paddle.assign(self.x)
466
                tensor_array = paddle.tensor.create_array(dtype='float32')
467 468 469
                zero = fluid.layers.fill_constant(
                    shape=[1], value=0, dtype="int64"
                )
470 471

                for i in range(self.iter_num):
472
                    paddle.tensor.array_write(input, zero + i, tensor_array)
473

474
                self.out_var = paddle.concat(tensor_array, axis=self.axis)
475 476 477 478
        else:
            self.program = paddle.static.Program()
            with paddle.static.program_guard(self.program):
                input = paddle.assign(self.x)
479
                tensor_array = paddle.tensor.create_array(
480 481 482
                    dtype='float32'
                )  # Api create_array is not supported in paddle 2.0 yet.
                zero = paddle.zeros(shape=[1], dtype="int64")
483

484 485
                for i in range(self.iter_num):
                    # Api array_write is not supported in paddle 2.0 yet.
486
                    paddle.tensor.array_write(input, zero + i, tensor_array)
487 488 489 490 491

                self.out_var = paddle.concat(tensor_array, axis=self.axis)

    def test_fluid_api(self):
        self._run_static_mode(use_fluid_api=True)
492

493 494
    def test_paddle_api(self):
        self._run_static_mode(use_fluid_api=False)
495

496 497
    def _run_static_mode(self, use_fluid_api):
        self.set_program(use_fluid_api)
498 499 500
        self.assertTrue(self.out_var.shape[self.axis] == -1)
        exe = fluid.Executor(self.place)
        res = exe.run(self.program, fetch_list=self.out_var)
501
        np.testing.assert_array_equal(
502 503
            res[0], np.concatenate([self.x] * self.iter_num, axis=self.axis)
        )
504 505


506 507 508 509 510 511 512 513 514 515
class TestConcatDoubleGradCheck(unittest.TestCase):
    def concat_wrapper(self, x):
        return paddle.concat(x)

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
516
        data1 = paddle.static.data('data1', [2, 3], dtype)
517
        data1.persistable = True
G
GGBond8488 已提交
518
        data2 = paddle.static.data('data2', [2, 3], dtype)
519 520 521 522
        data2.persistable = True
        out = paddle.concat([data1, data2])
        data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype)
        data2_arr = np.random.uniform(-1, 1, data2.shape).astype(dtype)
523 524 525 526 527 528 529
        gradient_checker.double_grad_check(
            [data1, data2],
            out,
            x_init=[data1_arr, data2_arr],
            place=place,
            eps=eps,
        )
530
        gradient_checker.double_grad_check_for_dygraph(
531 532
            self.concat_wrapper,
            [data1, data2],
533 534
            out,
            x_init=[data1_arr, data2_arr],
535 536
            place=place,
        )
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestConcatTripleGradCheck(unittest.TestCase):
    def concat_wrapper(self, x):
        return paddle.concat(x, 1)

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
557
        data1 = paddle.static.data('data1', [2, 3, 4], dtype)
558
        data1.persistable = True
G
GGBond8488 已提交
559
        data2 = paddle.static.data('data2', [2, 3, 4], dtype)
560 561 562 563
        data2.persistable = True
        out = paddle.concat([data1, data2], 1)
        data1_arr = np.random.uniform(-1, 1, data1.shape).astype(dtype)
        data2_arr = np.random.uniform(-1, 1, data2.shape).astype(dtype)
564 565 566 567 568 569 570
        gradient_checker.double_grad_check(
            [data1, data2],
            out,
            x_init=[data1_arr, data2_arr],
            place=place,
            eps=eps,
        )
571
        gradient_checker.double_grad_check_for_dygraph(
572 573
            self.concat_wrapper,
            [data1, data2],
574 575
            out,
            x_init=[data1_arr, data2_arr],
576 577
            place=place,
        )
578 579 580 581 582 583 584 585 586 587

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


588 589
if __name__ == '__main__':
    unittest.main()