test_slice_op.py 32.8 KB
Newer Older
W
whs 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16 17

import gradient_checker
W
whs 已提交
18
import numpy as np
19
from decorator_helper import prog_scope
20
from op_test import OpTest, convert_float_to_uint16
21

22
import paddle
23 24
from paddle import fluid
from paddle.fluid import core
25
from paddle.tensor.manipulation import tensor_array_to_tensor
W
whs 已提交
26

27 28
paddle.enable_static()

W
whs 已提交
29

30 31
# Situation 1: starts(list, no tensor), ends(list, no tensor)
# 1.1 without attr(decrease)
W
whs 已提交
32 33 34
class TestSliceOp(OpTest):
    def setUp(self):
        self.op_type = "slice"
X
xiaoguoguo626807 已提交
35 36
        self.prim_op_type = "prim"
        self.python_api = paddle.slice
37
        self.public_python_api = paddle.slice
W
whs 已提交
38 39 40 41 42 43
        self.config()
        self.inputs = {'Input': self.input}
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
44
            'ends': self.ends,
45
            'infer_flags': self.infer_flags,
W
whs 已提交
46 47 48
        }

    def config(self):
49
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
W
whs 已提交
50 51 52
        self.starts = [1, 0, 2]
        self.ends = [3, 3, 4]
        self.axes = [0, 1, 2]
53
        self.infer_flags = [1, 1, 1]
W
whs 已提交
54 55 56
        self.out = self.input[1:3, 0:3, 2:4, :]

    def test_check_output(self):
57
        self.check_output()
W
whs 已提交
58

59
    def test_check_grad_normal(self):
X
xiaoguoguo626807 已提交
60 61 62
        self.check_grad(
            ['Input'], 'Out', max_relative_error=0.006, check_prim=True
        )
63

W
whs 已提交
64

65 66
class TestCase1(TestSliceOp):
    def config(self):
67
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
68 69 70 71 72 73 74 75 76
        self.starts = [-3, 0, 2]
        self.ends = [3, 100, -1]
        self.axes = [0, 1, 2]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[-3:3, 0:100, 2:-1, :]


class TestCase2(TestSliceOp):
    def config(self):
77
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
78 79 80 81 82 83 84
        self.starts = [-3, 0, 2]
        self.ends = [3, 100, -1]
        self.axes = [0, 1, 3]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[-3:3, 0:100, :, 2:-1]


85 86 87
class TestSliceZerosShapeTensor(OpTest):
    def setUp(self):
        self.op_type = "slice"
X
xiaoguoguo626807 已提交
88 89
        self.prim_op_type = "prim"
        self.python_api = paddle.slice
90
        self.public_python_api = paddle.slice
91 92 93 94 95 96 97 98
        self.config()
        self.inputs = {'Input': self.input}
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
            'ends': self.ends,
            'infer_flags': self.infer_flags,
99
            'use_mkldnn': True,
100 101 102 103 104 105 106 107 108 109 110 111 112 113
        }

    def config(self):
        self.input = np.random.random([0, 0, 0]).astype("float32")
        self.starts = [1]
        self.ends = [2]
        self.axes = [0]
        self.infer_flags = []
        self.out = self.input[1:2]

    def test_check_output(self):
        self.check_output_with_place(paddle.CPUPlace())


114
# 1.2 with attr(decrease)
H
Hongyu Liu 已提交
115 116
class TestSliceOp_decs_dim(OpTest):
    def setUp(self):
117
        self.enable_cinn = True
H
Hongyu Liu 已提交
118
        self.op_type = "slice"
X
xiaoguoguo626807 已提交
119 120
        self.prim_op_type = "prim"
        self.python_api = paddle.slice
121
        self.public_python_api = paddle.slice
H
Hongyu Liu 已提交
122 123 124 125 126 127 128
        self.config()
        self.inputs = {'Input': self.input}
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
            'ends': self.ends,
129
            'infer_flags': self.infer_flags,
H
Hongyu Liu 已提交
130 131 132 133
            'decrease_axis': self.decrease_axis,
        }

    def config(self):
134
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
H
Hongyu Liu 已提交
135 136 137 138
        self.starts = [1, 0, 2]
        self.ends = [2, 3, 4]
        self.axes = [0, 1, 2]
        self.decrease_axis = [0]
139
        self.infer_flags = [1, 1, 1]
H
Hongyu Liu 已提交
140 141 142
        self.out = self.input[1, 0:3, 2:4, :]

    def test_check_output(self):
143
        self.check_output()
H
Hongyu Liu 已提交
144 145

    def test_check_grad_normal(self):
X
xiaoguoguo626807 已提交
146 147 148
        self.check_grad(
            ['Input'], 'Out', max_relative_error=0.006, check_prim=True
        )
H
Hongyu Liu 已提交
149 150


151 152
class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim):
    def config(self):
153
        self.enable_cinn = True
154
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
155 156 157 158 159 160 161 162 163 164
        self.starts = [1, 0, 2]
        self.ends = [2, 1, 4]
        self.axes = [0, 1, 2]
        self.decrease_axis = [0, 1]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[1, 0, 2:4, :]


class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim):
    def config(self):
165
        self.enable_cinn = True
166
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
167 168 169 170 171 172 173 174 175 176
        self.starts = [-1, 0, 2]
        self.ends = [1000000, 1, 4]
        self.axes = [0, 1, 2]
        self.decrease_axis = [0, 1]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[-1, 0, 2:4, :]


class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim):
    def config(self):
X
xiaoguoguo626807 已提交
177
        self.enable_cinn = True
178
        self.input = np.random.random([3, 4, 5, 7]).astype("float64")
179 180 181 182 183 184 185 186 187 188
        self.starts = [0, 1, 2, 3]
        self.ends = [1, 2, 3, 4]
        self.axes = [0, 1, 2, 3]
        self.decrease_axis = [0, 1, 2, 3]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[0, 1, 2, 3:4]


class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim):
    def config(self):
189
        self.enable_cinn = True
190
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
191 192 193 194 195 196 197 198
        self.starts = [-1]
        self.ends = [1000000]
        self.axes = [3]
        self.decrease_axis = [3]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[:, :, :, -1]


X
xiaoguoguo626807 已提交
199
# test_6 with test_2 with test_3
200 201
class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim):
    def config(self):
202
        self.enable_cinn = True
203
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
204 205 206 207 208 209 210 211 212 213 214
        self.starts = [0, 1, 2, 3]
        self.ends = [1, 2, 3, 4]
        self.axes = [0, 1, 2, 3]
        self.decrease_axis = [0, 1, 2, 3]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[0, 1, 2, 3:4]


# Situation 2: starts(list, have tensor), ends(list, no tensor)
# without attr(decrease)
class TestSliceOp_starts_ListTensor(OpTest):
H
Hongyu Liu 已提交
215 216
    def setUp(self):
        self.op_type = "slice"
X
xiaoguoguo626807 已提交
217
        self.python_api = paddle.slice
H
Hongyu Liu 已提交
218
        self.config()
219 220 221

        starts_tensor = []
        for index, ele in enumerate(self.starts):
222 223 224
            starts_tensor.append(
                ("x" + str(index), np.ones((1)).astype('int64') * ele)
            )
225 226

        self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}
H
Hongyu Liu 已提交
227 228 229
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
230
            'starts': self.starts_infer,
H
Hongyu Liu 已提交
231
            'ends': self.ends,
232
            'infer_flags': self.infer_flags,
H
Hongyu Liu 已提交
233 234 235
        }

    def config(self):
236
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
H
Hongyu Liu 已提交
237
        self.starts = [1, 0, 2]
238
        self.ends = [3, 3, 4]
H
Hongyu Liu 已提交
239
        self.axes = [0, 1, 2]
240 241 242 243
        self.infer_flags = [-1, 1, -1]
        self.out = self.input[1:3, 0:3, 2:4, :]

        self.starts_infer = [-1, 0, -1]
H
Hongyu Liu 已提交
244 245

    def test_check_output(self):
246
        self.check_output()
H
Hongyu Liu 已提交
247 248

    def test_check_grad_normal(self):
249
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)
H
Hongyu Liu 已提交
250 251


252 253 254
# Situation 2: starts(list, have tensor), ends(list, no tensor)
#  with attr(decrease)
class TestSliceOp_decs_dim_starts_ListTensor(OpTest):
H
Hongyu Liu 已提交
255 256
    def setUp(self):
        self.op_type = "slice"
X
xiaoguoguo626807 已提交
257
        self.python_api = paddle.slice
H
Hongyu Liu 已提交
258
        self.config()
259 260 261

        starts_tensor = []
        for index, ele in enumerate(self.starts):
262 263 264
            starts_tensor.append(
                ("x" + str(index), np.ones((1)).astype('int32') * ele)
            )
265 266 267

        self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}

H
Hongyu Liu 已提交
268 269 270
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
271
            'starts': self.starts_infer,
H
Hongyu Liu 已提交
272
            'ends': self.ends,
273
            'infer_flags': self.infer_flags,
H
Hongyu Liu 已提交
274 275 276 277
            'decrease_axis': self.decrease_axis,
        }

    def config(self):
278
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
279 280
        self.starts = [1, 0, 2]
        self.ends = [2, 3, 4]
H
Hongyu Liu 已提交
281
        self.axes = [0, 1, 2]
282 283 284 285 286
        self.decrease_axis = [0]
        self.infer_flags = [1, -1, 1]
        self.out = self.input[1, 0:3, 2:4, :]

        self.starts_infer = [1, -1, 2]
H
Hongyu Liu 已提交
287 288

    def test_check_output(self):
289
        self.check_output()
H
Hongyu Liu 已提交
290 291

    def test_check_grad_normal(self):
292
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)
H
Hongyu Liu 已提交
293 294


295
class TestSliceOp_decs_dim_5_starts_ListTensor(
296 297
    TestSliceOp_decs_dim_starts_ListTensor
):
298
    def config(self):
299
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
300 301 302 303 304 305 306 307 308 309 310 311 312
        self.starts = [-1]
        self.ends = [1000000]
        self.axes = [3]
        self.decrease_axis = [3]
        self.infer_flags = [-1]
        self.out = self.input[:, :, :, -1]

        self.starts_infer = [-1]


# Situation 3: starts(tensor), ends(list, no tensor)
# with attr(decrease)
class TestSliceOp_decs_dim_starts_OneTensor(OpTest):
H
Hongyu Liu 已提交
313 314
    def setUp(self):
        self.op_type = "slice"
X
xiaoguoguo626807 已提交
315
        self.python_api = paddle.slice
H
Hongyu Liu 已提交
316
        self.config()
317 318
        self.inputs = {
            'Input': self.input,
319
            "StartsTensor": np.array(self.starts, dtype="int32"),
320
        }
H
Hongyu Liu 已提交
321 322 323
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
324
            # 'starts': self.starts,
H
Hongyu Liu 已提交
325
            'ends': self.ends,
326
            'infer_flags': self.infer_flags,
H
Hongyu Liu 已提交
327 328 329 330
            'decrease_axis': self.decrease_axis,
        }

    def config(self):
331
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
332 333 334 335 336 337
        self.starts = [1, 0, 2]
        self.ends = [2, 3, 4]
        self.axes = [0, 1, 2]
        self.decrease_axis = [0]
        self.infer_flags = [-1, -1, -1]
        self.out = self.input[1, 0:3, 2:4, :]
H
Hongyu Liu 已提交
338 339

    def test_check_output(self):
340
        self.check_output()
H
Hongyu Liu 已提交
341 342

    def test_check_grad_normal(self):
343
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)
H
Hongyu Liu 已提交
344 345


346 347 348
# Situation 4: starts(tensor), ends(tensor)
#  without attr(decrease)
class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest):
H
Hongyu Liu 已提交
349 350
    def setUp(self):
        self.op_type = "slice"
X
xiaoguoguo626807 已提交
351
        self.python_api = paddle.slice
H
Hongyu Liu 已提交
352
        self.config()
353 354 355

        self.inputs = {
            'Input': self.input,
356
            "StartsTensor": np.array(self.starts, dtype="int64"),
357
            "EndsTensor": np.array(self.ends, dtype="int32"),
358
        }
H
Hongyu Liu 已提交
359 360 361
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
362 363
            # 'starts': self.starts,
            # 'ends': self.ends_infer,
364
            'infer_flags': self.infer_flags,
H
Hongyu Liu 已提交
365 366 367
        }

    def config(self):
368
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
369 370 371 372 373
        self.starts = [1, 0, 2]
        self.ends = [3, 3, 4]
        self.axes = [0, 1, 2]
        self.infer_flags = [-1, -1, -1]
        self.out = self.input[1:3, 0:3, 2:4, :]
H
Hongyu Liu 已提交
374 375

    def test_check_output(self):
376
        self.check_output()
H
Hongyu Liu 已提交
377 378

    def test_check_grad_normal(self):
379
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)
H
Hongyu Liu 已提交
380 381


382 383 384 385 386
# Situation 5: starts(tensor), ends(tensor)
#  with attr(decrease)
class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest):
    def setUp(self):
        self.op_type = "slice"
X
xiaoguoguo626807 已提交
387
        self.python_api = paddle.slice
388 389 390
        self.config()
        self.inputs = {
            'Input': self.input,
391
            "StartsTensor": np.array(self.starts, dtype="int32"),
392
            "EndsTensor": np.array(self.ends, dtype="int32"),
393 394 395 396
        }
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
397 398
            # 'starts': self.starts,
            # 'ends': self.ends,
399 400 401 402
            'infer_flags': self.infer_flags,
            'decrease_axis': self.decrease_axis,
        }

W
whs 已提交
403
    def config(self):
404
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
405 406
        self.starts = [1, 0, 2]
        self.ends = [2, 1, 4]
W
whs 已提交
407
        self.axes = [0, 1, 2]
408 409 410 411 412
        self.decrease_axis = [0, 1]
        self.infer_flags = [-1, -1, -1]
        self.out = self.input[1, 0, 2:4, :]

    def test_check_output(self):
413
        self.check_output()
414 415

    def test_check_grad_normal(self):
416
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)
W
whs 已提交
417 418


419 420 421 422 423
# Situation 6: starts(tensor), ends(list, have tensor)
# without attr(decrease)
class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest):
    def setUp(self):
        self.op_type = "slice"
X
xiaoguoguo626807 已提交
424
        self.python_api = paddle.slice
425 426 427 428
        self.config()

        ends_tensor = []
        for index, ele in enumerate(self.ends):
429 430 431
            ends_tensor.append(
                ("y" + str(index), np.ones((1)).astype('int32') * ele)
            )
432 433 434

        self.inputs = {
            'Input': self.input,
435
            "StartsTensor": np.array(self.starts, dtype="int32"),
436
            'EndsTensorList': ends_tensor,
437 438 439 440
        }
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
441
            # 'starts': self.starts,
442
            'ends': self.ends_infer,
443
            'infer_flags': self.infer_flags,
444 445
        }

W
whs 已提交
446
    def config(self):
447
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
448 449 450 451 452 453 454 455 456
        self.starts = [1, 0, 2]
        self.ends = [3, 3, 4]
        self.axes = [0, 1, 2]
        self.infer_flags = [-1, -1, -1]
        self.out = self.input[1:3, 0:3, 2:4, :]

        self.ends_infer = [-1, 3, 4]

    def test_check_output(self):
457
        self.check_output()
458 459

    def test_check_grad_normal(self):
460
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)
W
whs 已提交
461 462


463
# Test CUDA float16
464 465 466
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
467 468 469
class TestFP16(OpTest):
    def setUp(self):
        self.op_type = "slice"
X
xiaoguoguo626807 已提交
470 471
        self.prim_op_type = "prim"
        self.python_api = paddle.slice
472
        self.public_python_api = paddle.slice
473 474 475 476 477 478 479
        self.config()
        self.inputs = {'Input': self.input}
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
            'ends': self.ends,
480
            'infer_flags': self.infer_flags,
481 482
        }

483 484 485 486 487 488 489
    def config(self):
        self.dtype = "float16"
        self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
        self.starts = [-3, 0, 2]
        self.ends = [3, 100, -1]
        self.axes = [0, 1, 3]
        self.out = self.input[-3:3, 0:100, :, 2:-1]
490
        self.infer_flags = [1, 1, 1]
491 492 493 494

    def test_check_output(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
495
            self.check_output_with_place(place, check_prim=True)
496 497 498

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
X
xiaoguoguo626807 已提交
499
        print("core:", core.is_float16_supported(place))
500
        if core.is_float16_supported(place):
501
            self.check_grad_with_place(
X
xiaoguoguo626807 已提交
502 503 504 505
                place,
                ['Input'],
                'Out',
                check_prim=True,
506
            )
507 508


509 510 511
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
512 513 514
class TestFP16_2(OpTest):
    def setUp(self):
        self.op_type = "slice"
X
xiaoguoguo626807 已提交
515 516
        self.prim_op_type = "prim"
        self.python_api = paddle.slice
517
        self.public_python_api = paddle.slice
518 519 520 521 522 523 524
        self.config()
        self.inputs = {'Input': self.input}
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
            'ends': self.ends,
525
            'infer_flags': self.infer_flags,
526 527
        }

528 529
    def config(self):
        self.dtype = "float16"
Z
zhupengyang 已提交
530
        self.input = np.random.random([3, 4, 10]).astype(self.dtype)
531 532 533 534
        self.starts = [0]
        self.ends = [1]
        self.axes = [1]
        self.out = self.input[:, 0:1, :]
535
        self.infer_flags = [1]
536 537 538 539

    def test_check_output(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
540
            self.check_output_with_place(place, check_prim=True)
541 542 543 544

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
545 546 547 548 549
            self.check_grad_with_place(
                place,
                ['Input'],
                'Out',
                numeric_grad_delta=0.5,
X
xiaoguoguo626807 已提交
550
                check_prim=True,
551
            )
552 553


554 555 556
class TestBF16(OpTest):
    def setUp(self):
        self.op_type = "slice"
X
xiaoguoguo626807 已提交
557 558
        self.prim_op_type = "prim"
        self.python_api = paddle.slice
559
        self.public_python_api = paddle.slice
560 561 562 563 564 565 566
        self.config()
        self.inputs = {'Input': convert_float_to_uint16(self.input)}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
            'ends': self.ends,
567
            'infer_flags': self.infer_flags,
568 569 570 571 572 573 574 575 576 577 578 579 580 581
        }

    def config(self):
        self.dtype = np.uint16
        self.input = np.random.random([3, 4, 5, 6]).astype(np.float32)
        self.starts = [-3, 0, 2]
        self.ends = [3, 100, -1]
        self.axes = [0, 1, 3]
        self.out = self.input[-3:3, 0:100, :, 2:-1]
        self.infer_flags = [1, 1, 1]

    def test_check_output(self):
        self.check_output()

582
    # pad not support bfloat16, so we can't test prim.
583 584 585 586
    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out')


587
# Test python API
588
class TestSliceAPI(unittest.TestCase):
589
    def test_1(self):
590
        input = np.random.random([3, 4, 5, 6]).astype("float64")
591 592
        minus_1 = paddle.tensor.fill_constant([1], "int32", -1)
        minus_3 = paddle.tensor.fill_constant([1], "int64", -3)
G
GGBond8488 已提交
593 594
        starts = paddle.static.data(
            name='starts', shape=[1, 3], dtype="float32"
595
        )
G
GGBond8488 已提交
596 597 598 599
        starts.desc.set_need_check_feed(False)
        ends = paddle.static.data(name='ends', shape=[3], dtype="float32")
        ends.desc.set_need_check_feed(False)
        x = paddle.static.data(
600 601 602 603
            name="x",
            shape=[3, 4, 5, 6],
            dtype="float64",
        )
604

605
        # value_int64 is greater than 2147483647 which is the max of int32
606
        value_int64 = paddle.tensor.fill_constant([1], "int64", 2147483648)
607

608 609 610 611 612 613 614 615 616
        out_1 = paddle.slice(
            x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1]
        )
        out_2 = paddle.slice(
            x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1]
        )
        out_3 = paddle.slice(
            x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1]
        )
617
        out_4 = paddle.slice(x, axes=[0, 1, 2], starts=starts, ends=ends)
618 619 620 621 622 623 624 625 626 627 628

        out_5 = x[-3:3, 0:100, 2:-1]
        out_6 = x[minus_3:3, 0:100, :, 2:-1]
        out_7 = x[minus_1, 0:100, :, 2:minus_1]

        exe = fluid.Executor(place=fluid.CPUPlace())
        res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run(
            fluid.default_main_program(),
            feed={
                "x": input,
                'starts': np.array([-3, 0, 2]).astype("int32"),
629
                'ends': np.array([3, 100, -1]).astype("int32"),
630
            },
631 632
            fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7],
        )
633 634 635 636 637 638 639 640 641 642

        assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :])
        assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1])
        assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1])
        assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :])
        assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :])
        assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1])
        assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1])


643 644 645 646 647 648 649
class TestSliceApiWithTensor(unittest.TestCase):
    def test_starts_ends_is_tensor(self):
        with paddle.fluid.dygraph.guard():
            a = paddle.rand(shape=[4, 5, 6], dtype='float32')
            axes = [0, 1, 2]
            starts = [-3, 0, 2]
            ends = [3, 2, 4]
650 651 652 653 654 655
            a_1 = paddle.slice(
                a,
                axes=axes,
                starts=paddle.to_tensor(starts, dtype='int32'),
                ends=paddle.to_tensor(ends, dtype='int32'),
            )
656 657
            a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends)

658
            np.testing.assert_array_equal(a_1.numpy(), a_2.numpy())
659

W
WeiXin 已提交
660 661 662 663 664 665 666 667 668 669 670 671 672 673
    def test_bool_tensor(self):
        with paddle.fluid.dygraph.guard():
            array = (np.arange(60).reshape([3, 4, 5]) % 3).astype('bool')
            tt = paddle.to_tensor(array)
            tt.stop_gradient = False

            starts = [0, 1, 2]
            ends = [3, 5, 4]
            axes = [0, 1, 2]

            y_paddle = paddle.slice(tt, axes, starts, ends)
            y_np = tt[0:3, 1:5, 2:4]

            self.assertTrue(paddle.bool == y_paddle.dtype)
674
            np.testing.assert_array_equal(y_paddle.numpy(), y_np)
W
WeiXin 已提交
675

676

H
hong 已提交
677 678 679
class TestSliceApiEager(unittest.TestCase):
    def test_slice_api(self):
        with paddle.fluid.dygraph.guard():
W
Weilong Wu 已提交
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
            a = paddle.rand(shape=[4, 5, 6], dtype='float32')
            a.stop_gradient = False
            axes = [0, 1, 2]
            starts = [-3, 0, 2]
            ends = [3, 2, 4]
            a_1 = paddle.slice(a, axes=axes, starts=starts, ends=ends)

            a_2 = paddle.slice(
                a,
                axes=axes,
                starts=paddle.to_tensor(starts),
                ends=paddle.to_tensor(ends),
            )
            np.testing.assert_array_equal(a_1.numpy(), a_2.numpy())
            a_1.backward()
            grad_truth = paddle.zeros_like(a)
            grad_truth[-3:3, 0:2, 2:4] = 1
            np.testing.assert_array_equal(grad_truth, a.gradient())

            np.testing.assert_allclose(
                a_1.numpy(), a[-3:3, 0:2, 2:4], rtol=1e-05
            )
H
hong 已提交
702 703


704 705 706 707 708 709 710 711 712
class TestSliceApiWithLoDTensorArray(unittest.TestCase):
    def setUp(self):
        self.shape = (3, 4)
        self.data = np.random.random(size=self.shape).astype('float32')
        self.idx = 0
        self.start = 0
        self.end = 2
        self.axis = 1

713 714 715 716 717
        self.place = (
            fluid.CUDAPlace(0)
            if fluid.is_compiled_with_cuda()
            else fluid.CPUPlace()
        )
718 719 720 721 722
        self.exe = fluid.Executor(self.place)

    def set_program_and_run(self, main_program, case_num):
        with fluid.program_guard(main_program):
            x = [
723 724 725 726 727 728 729 730 731
                paddle.static.data(
                    name='x0', shape=self.shape, dtype="float32"
                ),
                paddle.static.data(
                    name='x1', shape=self.shape, dtype="float32"
                ),
                paddle.static.data(
                    name='x2', shape=self.shape, dtype="float32"
                ),
732 733 734 735 736
            ]

            for each_x in x:
                each_x.stop_gradient = False

737
            arr = paddle.tensor.create_array(dtype="float32")
738
            for i in range(3):
739
                idx = paddle.tensor.array_length(arr)
740
                arr = paddle.tensor.array_write(x=x[i], i=idx, array=arr)
741 742 743 744 745

            if case_num == 1:
                self.sliced_arr = output = arr[0]

            elif case_num == 2:
746
                end = (
747
                    paddle.tensor.array_length(arr) - 1
748 749
                )  # dtype of end is int64
                self.sliced_arr = slice_arr = arr[self.start : end]
750
                output, _ = tensor_array_to_tensor(
751 752
                    slice_arr, axis=self.axis, use_stack=True
                )
753
            elif case_num == 3:
754
                value_int64 = paddle.tensor.fill_constant(
755 756 757
                    [1], "int64", 2147483648
                )
                self.sliced_arr = slice_arr = arr[self.start : value_int64]
758
                output, _ = tensor_array_to_tensor(
759 760
                    slice_arr, axis=self.axis, use_stack=True
                )
761

762
            loss = paddle.sum(output)
763 764
            fluid.backward.append_backward(loss)
            g_vars = list(
765 766 767 768 769 770 771 772 773 774
                map(
                    main_program.global_block().var,
                    [each_x.name + "@GRAD" for each_x in x],
                )
            )
            self.out, self.g_x0, self.g_x1, self.g_x2 = self.exe.run(
                main_program,
                feed={'x0': self.data, 'x1': self.data, 'x2': self.data},
                fetch_list=[output] + g_vars,
            )
775 776 777 778 779 780 781

    def test_case_1(self):
        main_program = fluid.Program()
        self.set_program_and_run(main_program, 1)

        self.assertTrue(self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR)
        self.assertEqual(self.sliced_arr.shape, self.shape)
782 783 784 785
        np.testing.assert_array_equal(self.out, self.data)
        np.testing.assert_array_equal(self.g_x0, np.ones_like(self.data))
        np.testing.assert_array_equal(self.g_x1, np.zeros_like(self.data))
        np.testing.assert_array_equal(self.g_x2, np.zeros_like(self.data))
786 787 788 789 790 791

    def test_case_2(self):
        main_program = fluid.Program()
        self.set_program_and_run(main_program, 2)

        self.assertTrue(
792 793
            self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY
        )
794
        self.assertEqual(self.sliced_arr.shape, self.shape)
795
        np.testing.assert_array_equal(
796 797
            self.out, np.stack([self.data, self.data], axis=self.axis)
        )
798 799 800
        np.testing.assert_array_equal(self.g_x0, np.ones_like(self.data))
        np.testing.assert_array_equal(self.g_x1, np.ones_like(self.data))
        np.testing.assert_array_equal(self.g_x2, np.zeros_like(self.data))
801

802 803 804 805 806
    def test_case_3(self):
        main_program = fluid.Program()
        self.set_program_and_run(main_program, 3)

        self.assertTrue(
807 808
            self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY
        )
809
        self.assertEqual(self.sliced_arr.shape, self.shape)
810
        np.testing.assert_array_equal(
811 812 813
            self.out,
            np.stack([self.data, self.data, self.data], axis=self.axis),
        )
814 815 816
        np.testing.assert_array_equal(self.g_x0, np.ones_like(self.data))
        np.testing.assert_array_equal(self.g_x1, np.ones_like(self.data))
        np.testing.assert_array_equal(self.g_x2, np.ones_like(self.data))
817

818

819 820 821 822 823
class TestImperativeVarBaseGetItem(unittest.TestCase):
    def test_getitem_with_long(self):
        with fluid.dygraph.guard():
            data = np.random.random((2, 80, 16128)).astype('float32')
            var = fluid.dygraph.to_variable(data)
824
            sliced = var[:, 10:, : var.shape[1]]  # var.shape[1] is 80L here
825 826
            self.assertEqual(sliced.shape, [2, 70, 80])

827
            sliced = var[:, var.shape[0] :, var.shape[0] : var.shape[1]]
828 829 830 831 832 833 834
            self.assertEqual(sliced.shape, [2, 78, 78])

    def test_getitem_with_float(self):
        def test_float_in_slice_item():
            with fluid.dygraph.guard():
                data = np.random.random((2, 80, 16128)).astype('float32')
                var = fluid.dygraph.to_variable(data)
835
                sliced = var[:, 1.1:, : var.shape[1]]
836 837 838 839 840 841 842 843 844 845 846 847

        self.assertRaises(Exception, test_float_in_slice_item)

        def test_float_in_index():
            with fluid.dygraph.guard():
                data = np.random.random((2, 80, 16128)).astype('float32')
                var = fluid.dygraph.to_variable(data)
                sliced = var[1.1]

        self.assertRaises(Exception, test_float_in_index)


848 849 850 851 852 853 854
class TestInferShape(unittest.TestCase):
    def test(self):
        x = paddle.ones(shape=[3, 4, 5])
        x.desc.set_shape([3, -1, 5])
        self.assertEqual(x.shape, (3, -1, 5))

        out0 = paddle.slice(x, axes=[1], starts=[0], ends=[3])
855
        self.assertEqual(out0.shape, (3, -1, 5))
856

857 858 859 860 861 862
    def test_axis_less_than_zero(self):
        # Using paddle.disable_static will make other unittests fail.
        with fluid.dygraph.guard():
            x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4])
            x = paddle.to_tensor(x_arr)

863 864 865 866 867 868 869 870
            pp_slice = paddle.slice(
                x,
                [
                    100,
                ],
                [0],
                [1],
            )
871
            np_slice = x_arr[:, :, 0:1]
872
            np.testing.assert_array_equal(pp_slice, np_slice)
873

874
            pp_slice = paddle.slice(x, (-100,), [0], [1])
875
            np_slice = x_arr[0:1]
876
            np.testing.assert_array_equal(pp_slice, np_slice)
877 878 879 880 881

            x_arr = np.array([], dtype=np.float32)
            x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0)))

            starts = paddle.to_tensor(
882 883
                np.reshape(np.array([], dtype=np.int32), (0,))
            )
884
            ends = paddle.to_tensor(
885 886
                np.reshape(np.array([], dtype=np.int32), (0,))
            )
887 888 889 890 891 892 893 894 895 896 897 898 899

            with self.assertRaises(ValueError):
                paddle.slice(x, [-1000000], starts, ends)

            with self.assertRaises(ValueError):
                paddle.slice(x, [1000000], starts, ends)

            with self.assertRaises(ValueError):
                paddle.slice(x, [], starts, ends)

            with self.assertRaises(ValueError):
                paddle.slice(x, 0, starts, ends)

900

901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
class TestSliceOpError(unittest.TestCase):
    def test_dismatch_shape(self):
        with fluid.dygraph.guard():
            with self.assertRaises(ValueError):
                array = np.array([], dtype=np.float32)
                x = paddle.to_tensor(np.reshape(array, [0]), dtype='float32')
                paddle.slice(x, axes=[0], starts=[], ends=[])

            with self.assertRaises(ValueError):
                array = np.array([], dtype=np.float32)
                x = paddle.to_tensor(np.reshape(array, [0]), dtype='float32')
                paddle.slice(x, axes=[0], starts=[0], ends=[])

            # if shape match, pass
            array = np.array([], dtype=np.float32)
            x = paddle.to_tensor(np.reshape(array, [0]), dtype='float32')
            out = paddle.slice(x, axes=[0], starts=[0], ends=[0])
            self.assertEqual(out.numel(), 0)
            # self.assertEqual(out.shape)


922 923 924
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
925 926 927 928
class TestImperativeCUDAPinnedInput(unittest.TestCase):
    def test_input_cuda_pinned_var(self):
        with fluid.dygraph.guard():
            data = np.random.random((2, 80, 16128)).astype('float32')
W
Weilong Wu 已提交
929
            var = core.eager.Tensor(
930 931 932 933 934 935 936
                value=data,
                name='',
                persistable=False,
                place=fluid.CUDAPinnedPlace(),
                zero_copy=False,
            )
            sliced = var[:, 10:, : var.shape[1]]
937 938 939
            self.assertEqual(sliced.shape, [2, 70, 80])


940 941
class TestSliceDoubleGradCheck(unittest.TestCase):
    def slice_wrapper(self, x):
942 943 944
        return paddle.slice(
            x[0], axes=[0, 1, 2], starts=[-3, 0, 2], ends=[3, 2, 4]
        )
945 946 947 948 949 950 951

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
952
        data = paddle.static.data('data', [4, 5, 6], dtype)
953
        data.persistable = True
954 955 956
        out = paddle.slice(
            data, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[3, 2, 4]
        )
957 958
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

959 960 961 962 963 964
        gradient_checker.double_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.double_grad_check_for_dygraph(
            self.slice_wrapper, [data], out, x_init=[data_arr], place=place
        )
965 966 967 968 969 970 971 972 973 974 975 976

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestSliceTripleGradCheck(unittest.TestCase):
    def slice_wrapper(self, x):
977 978 979
        return paddle.slice(
            x[0], axes=[0, 1, 2], starts=[-3, 0, 2], ends=[3, 2, 4]
        )
980 981 982 983 984 985 986

    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        eps = 0.005
        dtype = np.float32

G
GGBond8488 已提交
987
        data = paddle.static.data('data', [4, 5, 6], dtype)
988
        data.persistable = True
989 990 991
        out = paddle.slice(
            data, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[3, 2, 4]
        )
992 993
        data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)

994 995 996 997 998 999
        gradient_checker.triple_grad_check(
            [data], out, x_init=[data_arr], place=place, eps=eps
        )
        gradient_checker.triple_grad_check_for_dygraph(
            self.slice_wrapper, [data], out, x_init=[data_arr], place=place
        )
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009

    def test_grad(self):
        paddle.enable_static()
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


W
whs 已提交
1010
if __name__ == '__main__':
H
hong 已提交
1011
    paddle.enable_static()
W
whs 已提交
1012
    unittest.main()