test_split_op.py 23.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Y
Yancey 已提交
15
import unittest
16

Y
Yancey 已提交
17
import numpy as np
18
from op_test import OpTest, convert_float_to_uint16
19 20

import paddle
21
import paddle.fluid as fluid
22
from paddle.fluid import Program, core, program_guard
Y
Yancey 已提交
23 24 25 26


class TestSplitOp(OpTest):
    def setUp(self):
T
fix ut  
typhoonzero 已提交
27
        self._set_op_type()
28
        self.dtype = self.get_dtype()
Y
Yancey1989 已提交
29
        axis = 1
30 31 32 33
        if self.dtype == np.uint16:
            x = np.random.random((4, 5, 6)).astype(np.float32)
            out = np.split(x, [2, 3], axis)
            self.inputs = {'X': convert_float_to_uint16(x)}
34 35 36 37 38 39
            self.outputs = {
                'Out': [
                    ('out%d' % i, convert_float_to_uint16(out[i]))
                    for i in range(len(out))
                ]
            }
40 41 42 43
        else:
            x = np.random.random((4, 5, 6)).astype(self.dtype)
            out = np.split(x, [2, 3], axis)
            self.inputs = {'X': x}
44 45 46
            self.outputs = {
                'Out': [('out%d' % i, out[i]) for i in range(len(out))]
            }
Y
Yancey1989 已提交
47
        self.attrs = {'axis': axis, 'sections': [2, 1, 2]}
Y
Yancey 已提交
48

49
    def get_dtype(self):
50
        return "float64"
51

T
typhoonzero 已提交
52 53 54
    def _set_op_type(self):
        self.op_type = "split"

Y
Yancey 已提交
55 56 57
    def test_check_output(self):
        self.check_output()

Y
Yancey1989 已提交
58 59
    def test_check_grad(self):
        self.check_grad(['X'], ['out0', 'out1', 'out2'])
Y
Yancey 已提交
60 61


62 63 64 65 66 67 68 69 70 71
# test with attr(num)
class TestSplitOp_2(OpTest):
    def setUp(self):
        self._set_op_type()
        self.dtype = self.get_dtype()
        self.init_data()
        self.inputs = {'X': self.x}
        self.attrs = {
            'axis': self.axis,
            'sections': self.sections,
72
            'num': self.num,
73 74 75
        }

        out = np.split(self.x, self.indices_or_sections, self.axis)
76
        self.outputs = {'Out': [('out%d' % i, out[i]) for i in range(len(out))]}
77 78 79 80 81 82 83 84 85

    def init_data(self):
        self.x = np.random.random((4, 5, 6)).astype(self.dtype)
        self.axis = 2
        self.sections = []
        self.num = 3
        self.indices_or_sections = 3

    def get_dtype(self):
86
        return "float64"
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105

    def _set_op_type(self):
        self.op_type = "split"

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], ['out0', 'out1', 'out2'])


# attr(axis) is Tensor
class TestSplitOp_AxisTensor(OpTest):
    def setUp(self):
        self._set_op_type()
        self.dtype = self.get_dtype()
        self.init_data()
        self.inputs = {
            'X': self.x,
106
            'AxisTensor': np.array([self.axis]).astype("int32"),
107 108 109 110
        }
        self.attrs = {'sections': self.sections, 'num': self.num}

        out = np.split(self.x, self.indices_or_sections, self.axis)
111
        self.outputs = {'Out': [('out%d' % i, out[i]) for i in range(len(out))]}
112 113 114 115 116 117 118 119 120

    def init_data(self):
        self.x = np.random.random((4, 5, 6)).astype(self.dtype)
        self.axis = 2
        self.sections = []
        self.num = 3
        self.indices_or_sections = 3

    def get_dtype(self):
121
        return "float64"
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142

    def _set_op_type(self):
        self.op_type = "split"

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], ['out0', 'out1', 'out2'])


# attr(sections) is list containing Tensor
class TestSplitOp_SectionsTensor(OpTest):
    def setUp(self):
        self._set_op_type()
        self.dtype = self.get_dtype()
        self.init_data()
        self.inputs = {'X': self.x}

        sections_tensor = []
        for index, ele in enumerate(self.sections):
143 144 145
            sections_tensor.append(
                ("x" + str(index), np.ones((1)).astype('int32') * ele)
            )
146 147 148 149 150 151

        self.inputs['SectionsTensorList'] = sections_tensor

        self.attrs = {
            'axis': self.axis,
            'sections': self.sections_infer,
152
            'num': self.num,
153 154 155
        }

        out = np.split(self.x, self.indices_or_sections, self.axis)
156
        self.outputs = {'Out': [('out%d' % i, out[i]) for i in range(len(out))]}
157 158 159 160 161 162 163 164 165 166

    def init_data(self):
        self.x = np.random.random((4, 5, 6)).astype(self.dtype)
        self.axis = 1
        self.sections = [2, 1, 2]
        self.sections_infer = [-1, -1, -1]
        self.num = 0
        self.indices_or_sections = [2, 3]

    def get_dtype(self):
167
        return "float64"
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187

    def _set_op_type(self):
        self.op_type = "split"

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], ['out0', 'out1', 'out2'])


class TestSplitOp_unk_section(OpTest):
    def setUp(self):
        self._set_op_type()
        self.dtype = self.get_dtype()
        self.init_data()
        self.inputs = {'X': self.x}
        self.attrs = {
            'axis': self.axis,
            'sections': self.sections,
188
            'num': self.num,
189 190 191
        }

        out = np.split(self.x, self.indices_or_sections, self.axis)
192
        self.outputs = {'Out': [('out%d' % i, out[i]) for i in range(len(out))]}
193 194 195 196 197 198 199 200 201

    def init_data(self):
        self.x = np.random.random((4, 5, 6)).astype(self.dtype)
        self.axis = 2
        self.sections = [2, 1, -1]
        self.num = 0
        self.indices_or_sections = [2, 3]

    def get_dtype(self):
202
        return "float64"
203 204 205 206 207 208 209 210 211 212 213

    def _set_op_type(self):
        self.op_type = "split"

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], ['out0', 'out1', 'out2'])


T
typhoonzero 已提交
214 215 216 217 218
class TestSplitByrefOp(OpTest):
    def _set_op_type(self):
        self.op_type = "split_byref"


219
# ----------------Split Fp16----------------
220 221 222


def create_test_fp16(parent):
223 224 225
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
226 227 228 229 230 231 232 233 234 235 236 237 238 239
    class TestSplitFp16(parent):
        def get_dtype(self):
            return np.float16

        def test_check_grad(self):
            pass

    cls_name = "{0}_{1}".format(parent.__name__, "Fp16")
    TestSplitFp16.__name__ = cls_name
    globals()[cls_name] = TestSplitFp16


create_test_fp16(TestSplitOp)

240
# ----------------Split Bf16----------------
241 242 243


def create_test_bf16(parent):
244 245 246
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
    class TestSplitBf16(parent):
        def get_dtype(self):
            return np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place)

        def test_check_grad(self):
            pass

    cls_name = "{0}_{1}".format(parent.__name__, "Bf16")
    TestSplitBf16.__name__ = cls_name
    globals()[cls_name] = TestSplitBf16


create_test_bf16(TestSplitOp)

265

266
class TestSplitAPI(unittest.TestCase):
267 268
    def test_api(self):
        input_1 = np.random.random([4, 5, 6]).astype("int32")
269 270 271
        positive_1_int32 = fluid.layers.fill_constant([1], "int32", 1)
        positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1)
        positive_2_int64 = fluid.layers.fill_constant([1], "int64", 2)
272 273 274
        x_1 = fluid.data(shape=[4, 5, 6], dtype='int32', name='x_1')
        x_2 = fluid.data(shape=[4, 5, None], dtype='int32', name='x_2')

275 276
        out_0, out_1, out_2 = paddle.split(
            x=x_1,
277
            num_or_sections=[positive_2_int64, positive_1_int32, -1],
278
            axis=positive_1_int64,
279
        )
280

281 282
        out_3, out_4, out_5 = paddle.split(
            x=x_1, num_or_sections=[2, 1, 2], axis=positive_1_int32
283
        )
284
        paddle.split(x=x_2, num_or_sections=2, axis=2)
285 286

        exe = fluid.Executor(place=fluid.CPUPlace())
287 288 289 290 291
        [res_0, res_1, res_2, res_3, res_4, res_5] = exe.run(
            fluid.default_main_program(),
            feed={"x_1": input_1, "x_2": input_1},
            fetch_list=[out_0, out_1, out_2, out_3, out_4, out_5],
        )
292 293 294 295 296 297 298 299 300 301

        out = np.split(input_1, [2, 3], 1)
        assert np.array_equal(res_0, out[0])
        assert np.array_equal(res_1, out[1])
        assert np.array_equal(res_2, out[2])
        assert np.array_equal(res_3, out[0])
        assert np.array_equal(res_4, out[1])
        assert np.array_equal(res_5, out[2])


302
class TestSplitOpError(unittest.TestCase):
303 304 305 306 307
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The type of axis in split_op should be int or Variable.
            def test_axis_type():
                x6 = fluid.layers.data(shape=[4], dtype='float16', name='x3')
308
                paddle.split(x=x6, num_or_sections=2, axis=3.2)
309 310 311

            self.assertRaises(TypeError, test_axis_type)

312 313 314 315
            # The type of axis in split_op should be int or Variable.
            def test_axis_variable_type():
                x9 = fluid.layers.data(shape=[4], dtype='float16', name='x9')
                x10 = fluid.layers.data(shape=[1], dtype='float16', name='x10')
316
                paddle.split(x=x9, num_or_sections=2, axis=x10)
317 318 319

            self.assertRaises(TypeError, test_axis_variable_type)

320 321 322
            # The type of num_or_sections in split_op should be int, tuple or list.
            def test_num_or_sections_type():
                x6 = fluid.layers.data(shape=[4], dtype='float16', name='x4')
323
                paddle.split(x=x6, num_or_sections=2.1, axis=3)
324 325 326

            self.assertRaises(TypeError, test_num_or_sections_type)

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
            def test_num_or_sections_type_tensor():
                x7 = fluid.layers.data(shape=[4], dtype='float16', name='x5')
                paddle.split(input=x7, num_or_sections=2.1, dim=3)

            self.assertRaises(TypeError, test_num_or_sections_type_tensor)

            def test_axis_type_tensor():
                x8 = fluid.layers.data(shape=[4], dtype='float16', name='x6')
                paddle.split(input=x8, num_or_sections=2, dim=3.2)

            self.assertRaises(TypeError, test_axis_type_tensor)


class API_TestSplit(unittest.TestCase):
    def test_out(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            data1 = fluid.layers.data('data1', shape=[4, 6, 6], dtype='float64')
            data2 = fluid.layers.data('data2', shape=[1], dtype='int32')
345
            x0, x1, x2 = paddle.split(data1, num_or_sections=3, axis=data2)
346 347 348 349
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            input1 = np.random.random([4, 6, 6]).astype('float64')
            input2 = np.array([2]).astype('int32')
350 351 352
            r0, r1, r2, = exe.run(
                feed={"data1": input1, "data2": input2}, fetch_list=[x0, x1, x2]
            )
353
            ex_x0, ex_x1, ex_x2 = np.split(input1, 3, axis=2)
354 355 356
            np.testing.assert_allclose(ex_x0, r0, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, r1, rtol=1e-05)
            np.testing.assert_allclose(ex_x2, r2, rtol=1e-05)
357 358 359 360 361 362


class API_TestSplit2(unittest.TestCase):
    def test_out(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            data1 = fluid.layers.data('data1', shape=[4, 6, 6], dtype='float64')
363
            x0, x1, x2 = paddle.split(data1, num_or_sections=3, axis=2)
364 365 366
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            input1 = np.random.random([4, 6, 6]).astype('float64')
367 368 369 370 371
            (
                r0,
                r1,
                r2,
            ) = exe.run(feed={"data1": input1}, fetch_list=[x0, x1, x2])
372
            ex_x0, ex_x1, ex_x2 = np.split(input1, 3, axis=2)
373 374 375
            np.testing.assert_allclose(ex_x0, r0, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, r1, rtol=1e-05)
            np.testing.assert_allclose(ex_x2, r2, rtol=1e-05)
376 377 378 379 380 381


class API_TestSplit3(unittest.TestCase):
    def test_out(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            data = fluid.layers.data('data', shape=[-1, 10], dtype='float64')
382
            x0, x1 = paddle.split(data, num_or_sections=(3, 7), axis=1)
383 384 385 386
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            input1 = np.random.random([1, 10]).astype('float64')
            r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1])
387
            ex_x0, ex_x1 = np.split(input1, (3,), axis=1)
388 389
            np.testing.assert_allclose(ex_x0, r0, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, r1, rtol=1e-05)
390 391 392 393 394 395 396


class API_TestSplit4(unittest.TestCase):
    def test_out(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            data = fluid.layers.data('data', shape=[-1, 10], dtype='float64')
            index = fluid.layers.data('index', shape=[1], dtype='int32')
397
            x0, x1 = paddle.split(data, num_or_sections=(3, index), axis=1)
398 399 400 401
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            input1 = np.random.random([1, 10]).astype('float64')
            input2 = np.array([7]).astype('int32')
402 403 404 405
            r0, r1 = exe.run(
                feed={"data": input1, "index": input2}, fetch_list=[x0, x1]
            )
            ex_x0, ex_x1 = np.split(input1, (3,), axis=1)
406 407
            np.testing.assert_allclose(ex_x0, r0, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, r1, rtol=1e-05)
408 409


C
Charles-hit 已提交
410 411
class API_TestSplit5(unittest.TestCase):
    def test_out(self):
412 413 414
        for use_cuda in (
            [False, True] if core.is_compiled_with_cuda() else [False]
        ):
C
Charles-hit 已提交
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_1 = np.random.random([5, 4]).astype("int32")
                # input is a variable which shape is [5, 4]
                input = paddle.to_tensor(input_1)
                n = paddle.full([1], 5, dtype='int32')
                out = paddle.split(input, [n])
                exe = paddle.static.Executor(place=place)
                re = exe.run(fetch_list=[out])
                re = re[0]
                ex_out = np.split(input_1, [5])
                ex_out = ex_out[0]
                np.testing.assert_allclose(ex_out, re, rtol=1e-05)


430 431 432 433 434 435 436 437 438
class API_TestSplit6(unittest.TestCase):
    def test_out(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            data = fluid.layers.data('data', shape=[-1, 10], dtype='float64')
            x0, x1 = paddle.split(data, num_or_sections=[1, 1], axis=0)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            input1 = np.random.random([2, 10]).astype('float64')
            r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1])
439
            ex_x0, ex_x1 = np.split(input1, (1,), axis=0)
440 441 442 443
            np.testing.assert_allclose(ex_x0, r0, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, r1, rtol=1e-05)


C
Charles-hit 已提交
444 445 446 447 448 449
class API_TestDygraphFluidSplit(unittest.TestCase):
    def test_out1(self):
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
450
            x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
C
Charles-hit 已提交
451 452 453 454
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
            ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
455 456 457
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
            input.stop_gradient = False
458
            x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
459 460 461 462 463 464 465 466 467 468 469
            eager_x0_out = x0.numpy()
            eager_x1_out = x1.numpy()
            eager_x2_out = x2.numpy()
            loss = x0.sum()
            loss.backward()
            manul_grad = np.zeros_like(input_1)
            manul_grad[:, :2, :] = 1
            np.testing.assert_allclose(input.gradient(), manul_grad, rtol=1e-05)
            np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
            np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
C
Charles-hit 已提交
470 471 472 473 474 475 476 477 478 479

        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)

    def test_out2(self):
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
480
            x0, x1, x2 = paddle.split(input, [2, 2, 2], axis=1)
C
Charles-hit 已提交
481 482 483 484
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
            ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
485 486 487
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
            input.stop_gradient = False
488
            x0, x1, x2 = paddle.split(input, [2, 2, 2], axis=1)
489 490 491 492 493 494 495 496 497 498 499
            eager_x0_out = x0.numpy()
            eager_x1_out = x1.numpy()
            eager_x2_out = x2.numpy()
            loss = x0.sum()
            loss.backward()
            manul_grad = np.zeros_like(input_1)
            manul_grad[:, :2, :] = 1
            np.testing.assert_allclose(input.gradient(), manul_grad, rtol=1e-05)
            np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
            np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
C
Charles-hit 已提交
500 501 502 503 504 505

        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)


506
class API_TestDygraphSplit(unittest.TestCase):
507 508 509 510
    def test_out1(self):
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
H
hong 已提交
511
            input = paddle.to_tensor(input_1)
512 513 514 515 516
            x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
            ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
H
hong 已提交
517

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
            input.stop_gradient = False
            x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
            eager_x0_out = x0.numpy()
            eager_x1_out = x1.numpy()
            eager_x2_out = x2.numpy()
            loss = x0.sum()
            loss.backward()
            manul_grad = np.zeros_like(input_1)
            manul_grad[:, :2, :] = 1
            np.testing.assert_allclose(input.gradient(), manul_grad, rtol=1e-05)
            np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
            np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
H
hong 已提交
533

534 535 536
        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)
537 538 539 540 541

    def test_out2(self):
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("bool")
            # input is a variable which shape is [4, 6, 6]
H
hong 已提交
542
            input = paddle.to_tensor(input_1)
543 544 545 546 547
            x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
            ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
548 549 550
        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)
551

C
Charles-hit 已提交
552 553 554 555 556 557 558 559 560 561 562
    def test_out3(self):
        with fluid.dygraph.guard():
            np.random.seed(2021)
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
            out_dy = paddle.split(input, [6], axis=1)
            out_dy = out_dy[0]
            out_dy_np = out_dy.numpy()
            ex_out = np.split(input_1, [6], axis=1)
            ex_out = ex_out[0]
563 564 565 566 567
            input = paddle.to_tensor(input_1)
            out_eager = paddle.split(input, [6], axis=1)
            out_eager = out_eager[0]
            out_eager_np = out_dy.numpy()
            np.testing.assert_allclose(ex_out, out_eager_np, rtol=1e-05)
C
Charles-hit 已提交
568 569
        np.testing.assert_allclose(ex_out, out_dy_np, rtol=1e-05)

570 571 572 573
    def test_out_tensor_input(self):
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
H
hong 已提交
574
            input = paddle.to_tensor(input_1)
575
            num1 = paddle.full(shape=[1], fill_value=2, dtype='int32')
576 577 578
            x0, x1, x2 = paddle.split(
                input, num_or_sections=[num1, 2, 2], axis=1
            )
579 580 581 582
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
            ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
583 584 585
        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)
586 587

    def test_axis_tensor_input(self):
588 589 590
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
H
hong 已提交
591
            input = paddle.to_tensor(input_1)
592
            num1 = paddle.full(shape=[1], fill_value=1, dtype='int32')
593 594 595
            x0, x1, x2 = paddle.split(
                input, num_or_sections=[2, 2, 2], axis=num1
            )
596 597 598 599
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
            ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
600 601 602
        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)
603

604
    def test_negative_one_section(self):
605 606 607 608 609 610 611
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
            num1 = paddle.full(shape=[1], fill_value=1, dtype='int32')
            x0 = paddle.split(input, num_or_sections=[-1], axis=num1)
            x0_out = x0[0].numpy()
612
        np.testing.assert_array_equal(x0_out, input.numpy())
613

614

615 616 617 618 619 620 621 622 623 624
class API_TestEmptySplit(unittest.TestCase):
    def test_axis_input_empty_section(self):
        with fluid.dygraph.guard():
            input_1 = np.random.random([8, 6, 6]).astype("float32")
            # input is a variable which shape is [8, 6, 6]
            input = paddle.to_tensor(input_1)
            x0, x1, x2 = paddle.split(input, num_or_sections=[5, 0, 3])
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
625 626 627 628 629 630 631
            ex_x0, ex_x1, ex_x2 = np.split(
                input_1,
                [
                    5,
                    5,
                ],
            )
632 633 634
        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)
635 636


Y
Yancey 已提交
637
if __name__ == '__main__':
638
    paddle.enable_static()
Y
Yancey 已提交
639
    unittest.main()