test_split_op.py 25.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Y
Yancey 已提交
15
import unittest
16

Y
Yancey 已提交
17
import numpy as np
18
from eager_op_test import OpTest, convert_float_to_uint16
19 20

import paddle
21
from paddle import fluid
22
from paddle.fluid import Program, core, program_guard
Y
Yancey 已提交
23 24 25 26


class TestSplitOp(OpTest):
    def setUp(self):
27
        self.python_api = paddle.split
28
        self.public_python_api = paddle.split
29
        self.python_out_sig = ['out0', 'out1', 'out2']
T
fix ut  
typhoonzero 已提交
30
        self._set_op_type()
31
        self.prim_op_type = "prim"
32
        self.dtype = self.get_dtype()
Y
Yancey1989 已提交
33
        axis = 1
34
        if self.dtype == np.uint16:
35
            self.enable_cinn = False
36 37 38
            x = np.random.random((4, 5, 6)).astype(np.float32)
            out = np.split(x, [2, 3], axis)
            self.inputs = {'X': convert_float_to_uint16(x)}
39 40 41 42 43 44
            self.outputs = {
                'Out': [
                    ('out%d' % i, convert_float_to_uint16(out[i]))
                    for i in range(len(out))
                ]
            }
45 46 47 48
        else:
            x = np.random.random((4, 5, 6)).astype(self.dtype)
            out = np.split(x, [2, 3], axis)
            self.inputs = {'X': x}
49 50 51
            self.outputs = {
                'Out': [('out%d' % i, out[i]) for i in range(len(out))]
            }
Y
Yancey1989 已提交
52
        self.attrs = {'axis': axis, 'sections': [2, 1, 2]}
Y
Yancey 已提交
53

54
    def get_dtype(self):
55
        return "float64"
56

T
typhoonzero 已提交
57 58 59
    def _set_op_type(self):
        self.op_type = "split"

Y
Yancey 已提交
60 61 62
    def test_check_output(self):
        self.check_output()

Y
Yancey1989 已提交
63
    def test_check_grad(self):
64
        self.check_grad(['X'], ['out0', 'out1', 'out2'], check_prim=True)
Y
Yancey 已提交
65 66


67
# test with attr(num)
68
class TestSplitWithNumOp(OpTest):
69
    def setUp(self):
70
        self.python_api = paddle.split
71
        self.public_python_api = paddle.split
72
        self.python_out_sig = ['out0', 'out1', 'out2']
73
        self._set_op_type()
74
        self.prim_op_type = "prim"
75 76 77 78 79
        self.dtype = self.get_dtype()
        self.init_data()
        self.attrs = {
            'axis': self.axis,
            'sections': self.sections,
80
            'num': self.num,
81
        }
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
        if self.dtype == np.uint16:
            self.inputs = {'X': convert_float_to_uint16(self.x)}
            out = np.split(self.x, self.indices_or_sections, self.axis)
            self.outputs = {
                'Out': [
                    ('out%d' % i, convert_float_to_uint16(out[i]))
                    for i in range(len(out))
                ]
            }
        else:
            self.inputs = {'X': self.x}
            out = np.split(self.x, self.indices_or_sections, self.axis)
            self.outputs = {
                'Out': [('out%d' % i, out[i]) for i in range(len(out))]
            }
97 98

    def init_data(self):
99 100 101 102
        if self.dtype == np.uint16:
            self.x = np.random.random((4, 5, 6)).astype(np.float32)
        else:
            self.x = np.random.random((4, 5, 6)).astype(self.dtype)
103 104 105 106 107 108
        self.axis = 2
        self.sections = []
        self.num = 3
        self.indices_or_sections = 3

    def get_dtype(self):
109
        return "float64"
110 111 112 113 114 115 116 117

    def _set_op_type(self):
        self.op_type = "split"

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
118
        self.check_grad(['X'], ['out0', 'out1', 'out2'], check_prim=True)
119 120 121 122 123


# attr(axis) is Tensor
class TestSplitOp_AxisTensor(OpTest):
    def setUp(self):
124 125
        self.python_api = paddle.split
        self.python_out_sig = ['out0', 'out1', 'out2']
126 127 128 129 130
        self._set_op_type()
        self.dtype = self.get_dtype()
        self.init_data()
        self.inputs = {
            'X': self.x,
131
            'AxisTensor': np.array([self.axis]).astype("int32"),
132 133 134 135
        }
        self.attrs = {'sections': self.sections, 'num': self.num}

        out = np.split(self.x, self.indices_or_sections, self.axis)
136
        self.outputs = {'Out': [('out%d' % i, out[i]) for i in range(len(out))]}
137 138 139 140 141 142 143 144 145

    def init_data(self):
        self.x = np.random.random((4, 5, 6)).astype(self.dtype)
        self.axis = 2
        self.sections = []
        self.num = 3
        self.indices_or_sections = 3

    def get_dtype(self):
146
        return "float64"
147 148 149 150 151 152 153 154 155 156 157 158 159 160

    def _set_op_type(self):
        self.op_type = "split"

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], ['out0', 'out1', 'out2'])


# attr(sections) is list containing Tensor
class TestSplitOp_SectionsTensor(OpTest):
    def setUp(self):
161 162
        self.python_api = paddle.split
        self.python_out_sig = ['out0', 'out1', 'out2']
163 164 165 166 167 168 169
        self._set_op_type()
        self.dtype = self.get_dtype()
        self.init_data()
        self.inputs = {'X': self.x}

        sections_tensor = []
        for index, ele in enumerate(self.sections):
170
            sections_tensor.append(
171
                ("x" + str(index), np.ones(1).astype('int32') * ele)
172
            )
173 174 175 176 177 178

        self.inputs['SectionsTensorList'] = sections_tensor

        self.attrs = {
            'axis': self.axis,
            'sections': self.sections_infer,
179
            'num': self.num,
180 181 182
        }

        out = np.split(self.x, self.indices_or_sections, self.axis)
183
        self.outputs = {'Out': [('out%d' % i, out[i]) for i in range(len(out))]}
184 185 186 187 188 189 190 191 192 193

    def init_data(self):
        self.x = np.random.random((4, 5, 6)).astype(self.dtype)
        self.axis = 1
        self.sections = [2, 1, 2]
        self.sections_infer = [-1, -1, -1]
        self.num = 0
        self.indices_or_sections = [2, 3]

    def get_dtype(self):
194
        return "float64"
195 196 197 198 199 200 201 202 203 204 205 206 207

    def _set_op_type(self):
        self.op_type = "split"

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], ['out0', 'out1', 'out2'])


class TestSplitOp_unk_section(OpTest):
    def setUp(self):
208
        self.python_api = paddle.split
209
        self.public_python_api = paddle.split
210
        self.python_out_sig = ['out0', 'out1', 'out2']
211
        self._set_op_type()
212
        self.prim_op_type = "prim"
213 214 215 216 217 218
        self.dtype = self.get_dtype()
        self.init_data()
        self.inputs = {'X': self.x}
        self.attrs = {
            'axis': self.axis,
            'sections': self.sections,
219
            'num': self.num,
220 221 222
        }

        out = np.split(self.x, self.indices_or_sections, self.axis)
223
        self.outputs = {'Out': [('out%d' % i, out[i]) for i in range(len(out))]}
224 225 226 227 228 229 230 231 232

    def init_data(self):
        self.x = np.random.random((4, 5, 6)).astype(self.dtype)
        self.axis = 2
        self.sections = [2, 1, -1]
        self.num = 0
        self.indices_or_sections = [2, 3]

    def get_dtype(self):
233
        return "float64"
234 235 236 237 238 239 240 241

    def _set_op_type(self):
        self.op_type = "split"

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
242
        self.check_grad(['X'], ['out0', 'out1', 'out2'], check_prim=True)
243 244


T
typhoonzero 已提交
245 246 247 248 249
class TestSplitByrefOp(OpTest):
    def _set_op_type(self):
        self.op_type = "split_byref"


250
# ----------------Split Fp16----------------
251 252 253


def create_test_fp16(parent):
254 255 256
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
257
    class TestSplitFP16Op(parent):
258 259 260
        def get_dtype(self):
            return np.float16

261 262 263
    cls_name = "{}_{}".format(parent.__name__, "FP16Op")
    TestSplitFP16Op.__name__ = cls_name
    globals()[cls_name] = TestSplitFP16Op
264 265 266


create_test_fp16(TestSplitOp)
267
create_test_fp16(TestSplitWithNumOp)
268

269
# ----------------Split Bf16----------------
270 271 272


def create_test_bf16(parent):
273
    @unittest.skipIf(
274 275 276
        not core.is_compiled_with_cuda()
        or not core.is_bfloat16_supported(core.CUDAPlace(0)),
        "core is not compiled with CUDA or not support bfloat16",
277
    )
278
    class TestSplitBF16Op(parent):
279 280 281 282 283 284 285 286
        def get_dtype(self):
            return np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place)

        def test_check_grad(self):
287 288
            place = core.CUDAPlace(0)
            self.check_grad_with_place(place, ['X'], 'out2')
289

290 291 292
    cls_name = "{}_{}".format(parent.__name__, "BF16Op")
    TestSplitBF16Op.__name__ = cls_name
    globals()[cls_name] = TestSplitBF16Op
293 294 295


create_test_bf16(TestSplitOp)
296
create_test_bf16(TestSplitWithNumOp)
297

298

299
class TestSplitAPI(unittest.TestCase):
300 301
    def test_api(self):
        input_1 = np.random.random([4, 5, 6]).astype("int32")
302 303 304
        positive_1_int32 = paddle.tensor.fill_constant([1], "int32", 1)
        positive_1_int64 = paddle.tensor.fill_constant([1], "int64", 1)
        positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2)
305 306
        x_1 = paddle.static.data(shape=[4, 5, 6], dtype='int32', name='x_1')
        x_2 = paddle.static.data(shape=[4, 5, None], dtype='int32', name='x_2')
307

308 309
        out_0, out_1, out_2 = paddle.split(
            x=x_1,
310
            num_or_sections=[positive_2_int64, positive_1_int32, -1],
311
            axis=positive_1_int64,
312
        )
313

314 315
        out_3, out_4, out_5 = paddle.split(
            x=x_1, num_or_sections=[2, 1, 2], axis=positive_1_int32
316
        )
317
        paddle.split(x=x_2, num_or_sections=2, axis=2)
318 319

        exe = fluid.Executor(place=fluid.CPUPlace())
320 321 322 323 324
        [res_0, res_1, res_2, res_3, res_4, res_5] = exe.run(
            fluid.default_main_program(),
            feed={"x_1": input_1, "x_2": input_1},
            fetch_list=[out_0, out_1, out_2, out_3, out_4, out_5],
        )
325 326 327 328 329 330 331 332 333 334

        out = np.split(input_1, [2, 3], 1)
        assert np.array_equal(res_0, out[0])
        assert np.array_equal(res_1, out[1])
        assert np.array_equal(res_2, out[2])
        assert np.array_equal(res_3, out[0])
        assert np.array_equal(res_4, out[1])
        assert np.array_equal(res_5, out[2])


335
class TestSplitOpError(unittest.TestCase):
336 337 338 339
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The type of axis in split_op should be int or Variable.
            def test_axis_type():
G
GGBond8488 已提交
340 341 342
                x6 = paddle.static.data(
                    shape=[-1, 4], dtype='float16', name='x3'
                )
343
                paddle.split(x=x6, num_or_sections=2, axis=3.2)
344 345 346

            self.assertRaises(TypeError, test_axis_type)

347 348
            # The type of axis in split_op should be int or Variable.
            def test_axis_variable_type():
G
GGBond8488 已提交
349 350 351 352 353 354
                x9 = paddle.static.data(
                    shape=[-1, 4], dtype='float16', name='x9'
                )
                x10 = paddle.static.data(
                    shape=[-1, 1], dtype='float16', name='x10'
                )
355
                paddle.split(x=x9, num_or_sections=2, axis=x10)
356 357 358

            self.assertRaises(TypeError, test_axis_variable_type)

359 360
            # The type of num_or_sections in split_op should be int, tuple or list.
            def test_num_or_sections_type():
G
GGBond8488 已提交
361 362 363
                x6 = paddle.static.data(
                    shape=[-1, 4], dtype='float16', name='x4'
                )
364
                paddle.split(x=x6, num_or_sections=2.1, axis=3)
365 366 367

            self.assertRaises(TypeError, test_num_or_sections_type)

368
            def test_num_or_sections_type_tensor():
G
GGBond8488 已提交
369 370 371
                x7 = paddle.static.data(
                    shape=[-1, 4], dtype='float16', name='x5'
                )
372 373 374 375 376
                paddle.split(input=x7, num_or_sections=2.1, dim=3)

            self.assertRaises(TypeError, test_num_or_sections_type_tensor)

            def test_axis_type_tensor():
G
GGBond8488 已提交
377 378 379
                x8 = paddle.static.data(
                    shape=[-1, 4], dtype='float16', name='x6'
                )
380 381 382 383
                paddle.split(input=x8, num_or_sections=2, dim=3.2)

            self.assertRaises(TypeError, test_axis_type_tensor)

张春乔 已提交
384 385 386 387 388 389 390 391
        with paddle.fluid.dygraph.guard():

            def test_0_num_tensor():
                x = paddle.uniform([1, 1, 1], dtype='float32')
                paddle.split(x, num_or_sections=0)

            self.assertRaises(ValueError, test_0_num_tensor)

392 393 394 395

class API_TestSplit(unittest.TestCase):
    def test_out(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
G
GGBond8488 已提交
396 397 398 399 400 401
            data1 = paddle.static.data(
                'data1', shape=[-1, 4, 6, 6], dtype='float64'
            )
            data1.desc.set_need_check_feed(False)
            data2 = paddle.static.data('data2', shape=[-1, 1], dtype='int32')
            data2.desc.set_need_check_feed(False)
402
            x0, x1, x2 = paddle.split(data1, num_or_sections=3, axis=data2)
403 404 405 406
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            input1 = np.random.random([4, 6, 6]).astype('float64')
            input2 = np.array([2]).astype('int32')
407 408 409
            r0, r1, r2, = exe.run(
                feed={"data1": input1, "data2": input2}, fetch_list=[x0, x1, x2]
            )
410
            ex_x0, ex_x1, ex_x2 = np.split(input1, 3, axis=2)
411 412 413
            np.testing.assert_allclose(ex_x0, r0, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, r1, rtol=1e-05)
            np.testing.assert_allclose(ex_x2, r2, rtol=1e-05)
414 415 416 417 418


class API_TestSplit2(unittest.TestCase):
    def test_out(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
G
GGBond8488 已提交
419 420 421 422
            data1 = paddle.static.data(
                'data1', shape=[-1, 4, 6, 6], dtype='float64'
            )
            data1.desc.set_need_check_feed(False)
423
            x0, x1, x2 = paddle.split(data1, num_or_sections=3, axis=2)
424 425 426
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            input1 = np.random.random([4, 6, 6]).astype('float64')
427 428 429 430 431
            (
                r0,
                r1,
                r2,
            ) = exe.run(feed={"data1": input1}, fetch_list=[x0, x1, x2])
432
            ex_x0, ex_x1, ex_x2 = np.split(input1, 3, axis=2)
433 434 435
            np.testing.assert_allclose(ex_x0, r0, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, r1, rtol=1e-05)
            np.testing.assert_allclose(ex_x2, r2, rtol=1e-05)
436 437 438 439 440


class API_TestSplit3(unittest.TestCase):
    def test_out(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
G
GGBond8488 已提交
441
            data = paddle.static.data('data', shape=[-1, 10], dtype='float64')
442
            x0, x1 = paddle.split(data, num_or_sections=(3, 7), axis=1)
443 444 445 446
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            input1 = np.random.random([1, 10]).astype('float64')
            r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1])
447
            ex_x0, ex_x1 = np.split(input1, (3,), axis=1)
448 449
            np.testing.assert_allclose(ex_x0, r0, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, r1, rtol=1e-05)
450 451 452 453 454


class API_TestSplit4(unittest.TestCase):
    def test_out(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
G
GGBond8488 已提交
455 456
            data = paddle.static.data('data', shape=[-1, 10], dtype='float64')
            index = paddle.static.data('index', shape=[1], dtype='int32')
457
            x0, x1 = paddle.split(data, num_or_sections=(3, index), axis=1)
458 459 460 461
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            input1 = np.random.random([1, 10]).astype('float64')
            input2 = np.array([7]).astype('int32')
462 463 464 465
            r0, r1 = exe.run(
                feed={"data": input1, "index": input2}, fetch_list=[x0, x1]
            )
            ex_x0, ex_x1 = np.split(input1, (3,), axis=1)
466 467
            np.testing.assert_allclose(ex_x0, r0, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, r1, rtol=1e-05)
468 469


C
Charles-hit 已提交
470 471
class API_TestSplit5(unittest.TestCase):
    def test_out(self):
472 473 474
        for use_cuda in (
            [False, True] if core.is_compiled_with_cuda() else [False]
        ):
C
Charles-hit 已提交
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
            place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_1 = np.random.random([5, 4]).astype("int32")
                # input is a variable which shape is [5, 4]
                input = paddle.to_tensor(input_1)
                n = paddle.full([1], 5, dtype='int32')
                out = paddle.split(input, [n])
                exe = paddle.static.Executor(place=place)
                re = exe.run(fetch_list=[out])
                re = re[0]
                ex_out = np.split(input_1, [5])
                ex_out = ex_out[0]
                np.testing.assert_allclose(ex_out, re, rtol=1e-05)


490 491 492
class API_TestSplit6(unittest.TestCase):
    def test_out(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
G
GGBond8488 已提交
493
            data = paddle.static.data('data', shape=[-1, 10], dtype='float64')
494 495 496 497 498
            x0, x1 = paddle.split(data, num_or_sections=[1, 1], axis=0)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            input1 = np.random.random([2, 10]).astype('float64')
            r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1])
499
            ex_x0, ex_x1 = np.split(input1, (1,), axis=0)
500 501 502 503
            np.testing.assert_allclose(ex_x0, r0, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, r1, rtol=1e-05)


C
Charles-hit 已提交
504 505 506 507 508 509
class API_TestDygraphFluidSplit(unittest.TestCase):
    def test_out1(self):
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
510
            x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
C
Charles-hit 已提交
511 512 513 514
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
            ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
515 516 517
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
            input.stop_gradient = False
518
            x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
519 520 521 522 523 524 525 526 527 528 529
            eager_x0_out = x0.numpy()
            eager_x1_out = x1.numpy()
            eager_x2_out = x2.numpy()
            loss = x0.sum()
            loss.backward()
            manul_grad = np.zeros_like(input_1)
            manul_grad[:, :2, :] = 1
            np.testing.assert_allclose(input.gradient(), manul_grad, rtol=1e-05)
            np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
            np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
C
Charles-hit 已提交
530 531 532 533 534 535 536 537 538 539

        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)

    def test_out2(self):
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
540
            x0, x1, x2 = paddle.split(input, [2, 2, 2], axis=1)
C
Charles-hit 已提交
541 542 543 544
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
            ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
545 546 547
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
            input.stop_gradient = False
548
            x0, x1, x2 = paddle.split(input, [2, 2, 2], axis=1)
549 550 551 552 553 554 555 556 557 558 559
            eager_x0_out = x0.numpy()
            eager_x1_out = x1.numpy()
            eager_x2_out = x2.numpy()
            loss = x0.sum()
            loss.backward()
            manul_grad = np.zeros_like(input_1)
            manul_grad[:, :2, :] = 1
            np.testing.assert_allclose(input.gradient(), manul_grad, rtol=1e-05)
            np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
            np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
C
Charles-hit 已提交
560 561 562 563 564 565

        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)


566
class API_TestDygraphSplit(unittest.TestCase):
567 568 569 570
    def test_out1(self):
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
H
hong 已提交
571
            input = paddle.to_tensor(input_1)
572 573 574 575 576
            x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
            ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
H
hong 已提交
577

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
            input.stop_gradient = False
            x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
            eager_x0_out = x0.numpy()
            eager_x1_out = x1.numpy()
            eager_x2_out = x2.numpy()
            loss = x0.sum()
            loss.backward()
            manul_grad = np.zeros_like(input_1)
            manul_grad[:, :2, :] = 1
            np.testing.assert_allclose(input.gradient(), manul_grad, rtol=1e-05)
            np.testing.assert_allclose(ex_x0, eager_x0_out, rtol=1e-05)
            np.testing.assert_allclose(ex_x1, eager_x1_out, rtol=1e-05)
            np.testing.assert_allclose(ex_x2, eager_x2_out, rtol=1e-05)
H
hong 已提交
593

594 595 596
        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)
597 598 599 600 601

    def test_out2(self):
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("bool")
            # input is a variable which shape is [4, 6, 6]
H
hong 已提交
602
            input = paddle.to_tensor(input_1)
603 604 605 606 607
            x0, x1, x2 = paddle.split(input, num_or_sections=3, axis=1)
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
            ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
608 609 610
        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)
611

C
Charles-hit 已提交
612 613 614 615 616 617 618 619 620 621 622
    def test_out3(self):
        with fluid.dygraph.guard():
            np.random.seed(2021)
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
            out_dy = paddle.split(input, [6], axis=1)
            out_dy = out_dy[0]
            out_dy_np = out_dy.numpy()
            ex_out = np.split(input_1, [6], axis=1)
            ex_out = ex_out[0]
623 624 625 626 627
            input = paddle.to_tensor(input_1)
            out_eager = paddle.split(input, [6], axis=1)
            out_eager = out_eager[0]
            out_eager_np = out_dy.numpy()
            np.testing.assert_allclose(ex_out, out_eager_np, rtol=1e-05)
C
Charles-hit 已提交
628 629
        np.testing.assert_allclose(ex_out, out_dy_np, rtol=1e-05)

630 631 632 633
    def test_out_tensor_input(self):
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
H
hong 已提交
634
            input = paddle.to_tensor(input_1)
635
            num1 = paddle.full(shape=[1], fill_value=2, dtype='int32')
636 637 638
            x0, x1, x2 = paddle.split(
                input, num_or_sections=[num1, 2, 2], axis=1
            )
639 640 641 642
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
            ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
643 644 645
        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)
646 647

    def test_axis_tensor_input(self):
648 649 650
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
H
hong 已提交
651
            input = paddle.to_tensor(input_1)
652
            num1 = paddle.full(shape=[1], fill_value=1, dtype='int32')
653 654 655
            x0, x1, x2 = paddle.split(
                input, num_or_sections=[2, 2, 2], axis=num1
            )
656 657 658 659
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
            ex_x0, ex_x1, ex_x2 = np.split(input_1, 3, axis=1)
660 661 662
        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)
663

664
    def test_negative_one_section(self):
665 666 667 668 669 670 671
        with fluid.dygraph.guard():
            input_1 = np.random.random([4, 6, 6]).astype("int32")
            # input is a variable which shape is [4, 6, 6]
            input = paddle.to_tensor(input_1)
            num1 = paddle.full(shape=[1], fill_value=1, dtype='int32')
            x0 = paddle.split(input, num_or_sections=[-1], axis=num1)
            x0_out = x0[0].numpy()
672
        np.testing.assert_array_equal(x0_out, input.numpy())
673

674

675 676 677 678 679 680 681 682 683 684
class API_TestEmptySplit(unittest.TestCase):
    def test_axis_input_empty_section(self):
        with fluid.dygraph.guard():
            input_1 = np.random.random([8, 6, 6]).astype("float32")
            # input is a variable which shape is [8, 6, 6]
            input = paddle.to_tensor(input_1)
            x0, x1, x2 = paddle.split(input, num_or_sections=[5, 0, 3])
            x0_out = x0.numpy()
            x1_out = x1.numpy()
            x2_out = x2.numpy()
685 686 687 688 689 690 691
            ex_x0, ex_x1, ex_x2 = np.split(
                input_1,
                [
                    5,
                    5,
                ],
            )
692 693 694
        np.testing.assert_allclose(ex_x0, x0_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x1, x1_out, rtol=1e-05)
        np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05)
695 696


Y
Yancey 已提交
697
if __name__ == '__main__':
698
    paddle.enable_static()
Y
Yancey 已提交
699
    unittest.main()