test_slice_op.py 28.0 KB
Newer Older
W
whs 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

W
whs 已提交
17 18
import unittest
import numpy as np
19
import paddle.fluid.core as core
20
from op_test import OpTest, convert_float_to_uint16
21
import paddle.fluid as fluid
22
import paddle.fluid.layers as layers
23
import paddle
H
hong 已提交
24
from paddle.fluid.framework import _test_eager_guard
W
whs 已提交
25

26 27
paddle.enable_static()

W
whs 已提交
28

29 30
# Situation 1: starts(list, no tensor), ends(list, no tensor)
# 1.1 without attr(decrease)
W
whs 已提交
31
class TestSliceOp(OpTest):
32

W
whs 已提交
33 34 35 36 37 38 39 40
    def setUp(self):
        self.op_type = "slice"
        self.config()
        self.inputs = {'Input': self.input}
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
41 42
            'ends': self.ends,
            'infer_flags': self.infer_flags
W
whs 已提交
43 44 45
        }

    def config(self):
46
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
W
whs 已提交
47 48 49
        self.starts = [1, 0, 2]
        self.ends = [3, 3, 4]
        self.axes = [0, 1, 2]
50
        self.infer_flags = [1, 1, 1]
W
whs 已提交
51 52 53 54 55
        self.out = self.input[1:3, 0:3, 2:4, :]

    def test_check_output(self):
        self.check_output()

56 57 58
    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)

W
whs 已提交
59

60
class TestCase1(TestSliceOp):
61

62
    def config(self):
63
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
64 65 66 67 68 69 70 71
        self.starts = [-3, 0, 2]
        self.ends = [3, 100, -1]
        self.axes = [0, 1, 2]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[-3:3, 0:100, 2:-1, :]


class TestCase2(TestSliceOp):
72

73
    def config(self):
74
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
75 76 77 78 79 80 81 82
        self.starts = [-3, 0, 2]
        self.ends = [3, 100, -1]
        self.axes = [0, 1, 3]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[-3:3, 0:100, :, 2:-1]


# 1.2 with attr(decrease)
H
Hongyu Liu 已提交
83
class TestSliceOp_decs_dim(OpTest):
84

H
Hongyu Liu 已提交
85 86 87 88 89 90 91 92 93
    def setUp(self):
        self.op_type = "slice"
        self.config()
        self.inputs = {'Input': self.input}
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
            'ends': self.ends,
94
            'infer_flags': self.infer_flags,
H
Hongyu Liu 已提交
95 96 97 98
            'decrease_axis': self.decrease_axis,
        }

    def config(self):
99
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
H
Hongyu Liu 已提交
100 101 102 103
        self.starts = [1, 0, 2]
        self.ends = [2, 3, 4]
        self.axes = [0, 1, 2]
        self.decrease_axis = [0]
104
        self.infer_flags = [1, 1, 1]
H
Hongyu Liu 已提交
105 106 107 108 109 110 111 112 113
        self.out = self.input[1, 0:3, 2:4, :]

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)


114
class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim):
115

116
    def config(self):
117
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
118 119 120 121 122 123 124 125 126
        self.starts = [1, 0, 2]
        self.ends = [2, 1, 4]
        self.axes = [0, 1, 2]
        self.decrease_axis = [0, 1]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[1, 0, 2:4, :]


class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim):
127

128
    def config(self):
129
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
130 131 132 133 134 135 136 137 138
        self.starts = [-1, 0, 2]
        self.ends = [1000000, 1, 4]
        self.axes = [0, 1, 2]
        self.decrease_axis = [0, 1]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[-1, 0, 2:4, :]


class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim):
139

140
    def config(self):
141
        self.input = np.random.random([3, 4, 5, 7]).astype("float64")
142 143 144 145 146 147 148 149 150
        self.starts = [0, 1, 2, 3]
        self.ends = [1, 2, 3, 4]
        self.axes = [0, 1, 2, 3]
        self.decrease_axis = [0, 1, 2, 3]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[0, 1, 2, 3:4]


class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim):
151

152
    def config(self):
153
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
154 155 156 157 158 159 160 161 162
        self.starts = [-1]
        self.ends = [1000000]
        self.axes = [3]
        self.decrease_axis = [3]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[:, :, :, -1]


class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim):
163

164
    def config(self):
165
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
166 167 168 169 170 171 172 173 174 175 176
        self.starts = [0, 1, 2, 3]
        self.ends = [1, 2, 3, 4]
        self.axes = [0, 1, 2, 3]
        self.decrease_axis = [0, 1, 2, 3]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[0, 1, 2, 3:4]


# Situation 2: starts(list, have tensor), ends(list, no tensor)
# without attr(decrease)
class TestSliceOp_starts_ListTensor(OpTest):
177

H
Hongyu Liu 已提交
178 179 180
    def setUp(self):
        self.op_type = "slice"
        self.config()
181 182 183 184

        starts_tensor = []
        for index, ele in enumerate(self.starts):
            starts_tensor.append(("x" + str(index), np.ones(
185
                (1)).astype('int64') * ele))
186 187

        self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}
H
Hongyu Liu 已提交
188 189 190
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
191
            'starts': self.starts_infer,
H
Hongyu Liu 已提交
192
            'ends': self.ends,
193
            'infer_flags': self.infer_flags
H
Hongyu Liu 已提交
194 195 196
        }

    def config(self):
197
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
H
Hongyu Liu 已提交
198
        self.starts = [1, 0, 2]
199
        self.ends = [3, 3, 4]
H
Hongyu Liu 已提交
200
        self.axes = [0, 1, 2]
201 202 203 204
        self.infer_flags = [-1, 1, -1]
        self.out = self.input[1:3, 0:3, 2:4, :]

        self.starts_infer = [-1, 0, -1]
H
Hongyu Liu 已提交
205 206 207 208 209 210 211 212

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)


213 214 215
# Situation 2: starts(list, have tensor), ends(list, no tensor)
#  with attr(decrease)
class TestSliceOp_decs_dim_starts_ListTensor(OpTest):
216

H
Hongyu Liu 已提交
217 218 219
    def setUp(self):
        self.op_type = "slice"
        self.config()
220 221 222 223 224 225 226 227

        starts_tensor = []
        for index, ele in enumerate(self.starts):
            starts_tensor.append(("x" + str(index), np.ones(
                (1)).astype('int32') * ele))

        self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}

H
Hongyu Liu 已提交
228 229 230
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
231
            'starts': self.starts_infer,
H
Hongyu Liu 已提交
232
            'ends': self.ends,
233
            'infer_flags': self.infer_flags,
H
Hongyu Liu 已提交
234 235 236 237
            'decrease_axis': self.decrease_axis,
        }

    def config(self):
238
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
239 240
        self.starts = [1, 0, 2]
        self.ends = [2, 3, 4]
H
Hongyu Liu 已提交
241
        self.axes = [0, 1, 2]
242 243 244 245 246
        self.decrease_axis = [0]
        self.infer_flags = [1, -1, 1]
        self.out = self.input[1, 0:3, 2:4, :]

        self.starts_infer = [1, -1, 2]
H
Hongyu Liu 已提交
247 248 249 250 251 252 253 254

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)


255 256
class TestSliceOp_decs_dim_5_starts_ListTensor(
        TestSliceOp_decs_dim_starts_ListTensor):
257

258
    def config(self):
259
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
260 261 262 263 264 265 266 267 268 269 270 271 272
        self.starts = [-1]
        self.ends = [1000000]
        self.axes = [3]
        self.decrease_axis = [3]
        self.infer_flags = [-1]
        self.out = self.input[:, :, :, -1]

        self.starts_infer = [-1]


# Situation 3: starts(tensor), ends(list, no tensor)
# with attr(decrease)
class TestSliceOp_decs_dim_starts_OneTensor(OpTest):
273

H
Hongyu Liu 已提交
274 275 276
    def setUp(self):
        self.op_type = "slice"
        self.config()
277 278
        self.inputs = {
            'Input': self.input,
279
            "StartsTensor": np.array(self.starts, dtype="int32")
280
        }
H
Hongyu Liu 已提交
281 282 283
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
284
            #'starts': self.starts,
H
Hongyu Liu 已提交
285
            'ends': self.ends,
286
            'infer_flags': self.infer_flags,
H
Hongyu Liu 已提交
287 288 289 290
            'decrease_axis': self.decrease_axis,
        }

    def config(self):
291
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
292 293 294 295 296 297
        self.starts = [1, 0, 2]
        self.ends = [2, 3, 4]
        self.axes = [0, 1, 2]
        self.decrease_axis = [0]
        self.infer_flags = [-1, -1, -1]
        self.out = self.input[1, 0:3, 2:4, :]
H
Hongyu Liu 已提交
298 299 300 301 302 303 304 305

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)


306 307 308
# Situation 4: starts(tensor), ends(tensor)
#  without attr(decrease)
class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest):
309

H
Hongyu Liu 已提交
310 311 312
    def setUp(self):
        self.op_type = "slice"
        self.config()
313 314 315

        self.inputs = {
            'Input': self.input,
316 317
            "StartsTensor": np.array(self.starts, dtype="int64"),
            "EndsTensor": np.array(self.ends, dtype="int32")
318
        }
H
Hongyu Liu 已提交
319 320 321
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
322 323 324
            #'starts': self.starts,
            #'ends': self.ends_infer,
            'infer_flags': self.infer_flags
H
Hongyu Liu 已提交
325 326 327
        }

    def config(self):
328
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
329 330 331 332 333
        self.starts = [1, 0, 2]
        self.ends = [3, 3, 4]
        self.axes = [0, 1, 2]
        self.infer_flags = [-1, -1, -1]
        self.out = self.input[1:3, 0:3, 2:4, :]
H
Hongyu Liu 已提交
334 335 336 337 338 339 340 341

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)


342 343 344
# Situation 5: starts(tensor), ends(tensor)
#  with attr(decrease)
class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest):
345

346 347 348 349 350
    def setUp(self):
        self.op_type = "slice"
        self.config()
        self.inputs = {
            'Input': self.input,
351 352
            "StartsTensor": np.array(self.starts, dtype="int32"),
            "EndsTensor": np.array(self.ends, dtype="int32")
353 354 355 356 357 358 359 360 361 362
        }
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            #'starts': self.starts,
            #'ends': self.ends,
            'infer_flags': self.infer_flags,
            'decrease_axis': self.decrease_axis,
        }

W
whs 已提交
363
    def config(self):
364
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
365 366
        self.starts = [1, 0, 2]
        self.ends = [2, 1, 4]
W
whs 已提交
367
        self.axes = [0, 1, 2]
368 369 370 371 372 373 374 375 376
        self.decrease_axis = [0, 1]
        self.infer_flags = [-1, -1, -1]
        self.out = self.input[1, 0, 2:4, :]

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)
W
whs 已提交
377 378


379 380 381
# Situation 6: starts(tensor), ends(list, have tensor)
# without attr(decrease)
class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest):
382

383 384 385 386 387 388 389 390 391 392 393
    def setUp(self):
        self.op_type = "slice"
        self.config()

        ends_tensor = []
        for index, ele in enumerate(self.ends):
            ends_tensor.append(("y" + str(index), np.ones(
                (1)).astype('int32') * ele))

        self.inputs = {
            'Input': self.input,
394
            "StartsTensor": np.array(self.starts, dtype="int32"),
395 396 397 398 399 400 401 402 403 404
            'EndsTensorList': ends_tensor
        }
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            #'starts': self.starts,
            'ends': self.ends_infer,
            'infer_flags': self.infer_flags
        }

W
whs 已提交
405
    def config(self):
406
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
407 408 409 410 411 412 413 414 415 416 417 418 419
        self.starts = [1, 0, 2]
        self.ends = [3, 3, 4]
        self.axes = [0, 1, 2]
        self.infer_flags = [-1, -1, -1]
        self.out = self.input[1:3, 0:3, 2:4, :]

        self.ends_infer = [-1, 3, 4]

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)
W
whs 已提交
420 421


422
# Test CUDA float16
423 424
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
425
class TestFP16(OpTest):
426

427 428 429 430 431 432 433 434 435 436 437 438
    def setUp(self):
        self.op_type = "slice"
        self.config()
        self.inputs = {'Input': self.input}
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
            'ends': self.ends,
            'infer_flags': self.infer_flags
        }

439 440 441 442 443 444 445
    def config(self):
        self.dtype = "float16"
        self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
        self.starts = [-3, 0, 2]
        self.ends = [3, 100, -1]
        self.axes = [0, 1, 3]
        self.out = self.input[-3:3, 0:100, :, 2:-1]
446
        self.infer_flags = [1, 1, 1]
447 448 449 450 451 452 453 454 455

    def test_check_output(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_output_with_place(place, atol=1e-5)

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
456 457 458
            self.check_grad_with_place(place, ['Input'],
                                       'Out',
                                       max_relative_error=0.006)
459 460


461 462
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
463
class TestFP16_2(OpTest):
464

465 466 467 468 469 470 471 472 473 474 475 476
    def setUp(self):
        self.op_type = "slice"
        self.config()
        self.inputs = {'Input': self.input}
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
            'ends': self.ends,
            'infer_flags': self.infer_flags
        }

477 478
    def config(self):
        self.dtype = "float16"
Z
zhupengyang 已提交
479
        self.input = np.random.random([3, 4, 10]).astype(self.dtype)
480 481 482 483
        self.starts = [0]
        self.ends = [1]
        self.axes = [1]
        self.out = self.input[:, 0:1, :]
484
        self.infer_flags = [1]
485 486 487 488 489 490 491 492 493

    def test_check_output(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_output_with_place(place, atol=1e-5)

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
494 495 496 497
            self.check_grad_with_place(place, ['Input'],
                                       'Out',
                                       max_relative_error=0.006,
                                       numeric_grad_delta=0.5)
498 499


500
class TestBF16(OpTest):
501

502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
    def setUp(self):
        self.op_type = "slice"
        self.config()
        self.inputs = {'Input': convert_float_to_uint16(self.input)}
        self.outputs = {'Out': convert_float_to_uint16(self.out)}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
            'ends': self.ends,
            'infer_flags': self.infer_flags
        }

    def config(self):
        self.dtype = np.uint16
        self.input = np.random.random([3, 4, 5, 6]).astype(np.float32)
        self.starts = [-3, 0, 2]
        self.ends = [3, 100, -1]
        self.axes = [0, 1, 3]
        self.out = self.input[-3:3, 0:100, :, 2:-1]
        self.infer_flags = [1, 1, 1]

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out')


530
# Test python API
531
class TestSliceAPI(unittest.TestCase):
532

533
    def test_1(self):
534
        input = np.random.random([3, 4, 5, 6]).astype("float64")
535
        minus_1 = fluid.layers.fill_constant([1], "int32", -1)
536
        minus_3 = fluid.layers.fill_constant([1], "int64", -3)
537 538 539 540 541 542 543 544 545 546 547
        starts = fluid.layers.data(name='starts',
                                   shape=[1, 3],
                                   append_batch_size=False)
        ends = fluid.layers.data(name='ends',
                                 shape=[3],
                                 append_batch_size=False)

        x = fluid.layers.data(name="x",
                              shape=[3, 4, 5, 6],
                              append_batch_size=False,
                              dtype="float64")
548

549 550 551
        # value_int64 is greater than 2147483647 which is the max of int32
        value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648)

552 553 554 555 556 557 558 559 560 561 562 563
        out_1 = paddle.slice(x,
                             axes=[0, 1, 2],
                             starts=[-3, 0, 2],
                             ends=[value_int64, 100, -1])
        out_2 = paddle.slice(x,
                             axes=[0, 1, 3],
                             starts=[minus_3, 0, 2],
                             ends=[3, 100, -1])
        out_3 = paddle.slice(x,
                             axes=[0, 1, 3],
                             starts=[minus_3, 0, 2],
                             ends=[3, 100, minus_1])
564
        out_4 = paddle.slice(x, axes=[0, 1, 2], starts=starts, ends=ends)
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588

        out_5 = x[-3:3, 0:100, 2:-1]
        out_6 = x[minus_3:3, 0:100, :, 2:-1]
        out_7 = x[minus_1, 0:100, :, 2:minus_1]

        exe = fluid.Executor(place=fluid.CPUPlace())
        res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run(
            fluid.default_main_program(),
            feed={
                "x": input,
                'starts': np.array([-3, 0, 2]).astype("int32"),
                'ends': np.array([3, 100, -1]).astype("int32")
            },
            fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7])

        assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :])
        assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1])
        assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1])
        assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :])
        assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :])
        assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1])
        assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1])


589
class TestSliceApiWithTensor(unittest.TestCase):
590

591 592 593 594 595 596
    def test_starts_ends_is_tensor(self):
        with paddle.fluid.dygraph.guard():
            a = paddle.rand(shape=[4, 5, 6], dtype='float32')
            axes = [0, 1, 2]
            starts = [-3, 0, 2]
            ends = [3, 2, 4]
597 598 599 600
            a_1 = paddle.slice(a,
                               axes=axes,
                               starts=paddle.to_tensor(starts, dtype='int32'),
                               ends=paddle.to_tensor(ends, dtype='int32'))
601 602 603 604
            a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends)

            self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy()))

W
WeiXin 已提交
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
    def test_bool_tensor(self):
        with paddle.fluid.dygraph.guard():
            array = (np.arange(60).reshape([3, 4, 5]) % 3).astype('bool')
            tt = paddle.to_tensor(array)
            tt.stop_gradient = False

            starts = [0, 1, 2]
            ends = [3, 5, 4]
            axes = [0, 1, 2]

            y_paddle = paddle.slice(tt, axes, starts, ends)
            y_np = tt[0:3, 1:5, 2:4]

            self.assertTrue(paddle.bool == y_paddle.dtype)
            self.assertTrue(np.array_equal(y_paddle.numpy(), y_np))

621

H
hong 已提交
622
class TestSliceApiEager(unittest.TestCase):
623

H
hong 已提交
624 625 626 627 628 629 630 631 632 633
    def test_slice_api(self):
        with paddle.fluid.dygraph.guard():
            with _test_eager_guard():
                a = paddle.rand(shape=[4, 5, 6], dtype='float32')
                a.stop_gradient = False
                axes = [0, 1, 2]
                starts = [-3, 0, 2]
                ends = [3, 2, 4]
                a_1 = paddle.slice(a, axes=axes, starts=starts, ends=ends)

634 635 636 637
                a_2 = paddle.slice(a,
                                   axes=axes,
                                   starts=paddle.to_tensor(starts),
                                   ends=paddle.to_tensor(ends))
H
hong 已提交
638 639 640 641 642 643 644 645 646

                a_1.backward()
                grad_truth = paddle.zeros_like(a)
                grad_truth[-3:3, 0:2, 2:4] = 1
                self.assertTrue(np.array_equal(grad_truth, a.gradient()))

                self.assertTrue(np.allclose(a_1.numpy(), a[-3:3, 0:2, 2:4]))


647
class TestSliceApiWithLoDTensorArray(unittest.TestCase):
648

649 650 651 652 653 654 655 656
    def setUp(self):
        self.shape = (3, 4)
        self.data = np.random.random(size=self.shape).astype('float32')
        self.idx = 0
        self.start = 0
        self.end = 2
        self.axis = 1

657 658
        self.place = fluid.CUDAPlace(
            0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
659 660 661 662 663
        self.exe = fluid.Executor(self.place)

    def set_program_and_run(self, main_program, case_num):
        with fluid.program_guard(main_program):
            x = [
664 665 666
                fluid.data(name='x0', shape=self.shape, dtype="float32"),
                fluid.data(name='x1', shape=self.shape, dtype="float32"),
                fluid.data(name='x2', shape=self.shape, dtype="float32")
667 668 669 670 671 672 673 674 675 676 677 678 679 680
            ]

            for each_x in x:
                each_x.stop_gradient = False

            arr = layers.create_array(dtype="float32")
            for i in range(3):
                idx = layers.array_length(arr)
                arr = layers.array_write(x=x[i], i=idx, array=arr)

            if case_num == 1:
                self.sliced_arr = output = arr[0]

            elif case_num == 2:
681 682
                end = fluid.layers.array_length(
                    arr) - 1  # dtype of end is int64
683
                self.sliced_arr = slice_arr = arr[self.start:end]
684 685 686
                output, _ = fluid.layers.tensor_array_to_tensor(slice_arr,
                                                                axis=self.axis,
                                                                use_stack=True)
687 688 689 690
            elif case_num == 3:
                value_int64 = fluid.layers.fill_constant([1], "int64",
                                                         2147483648)
                self.sliced_arr = slice_arr = arr[self.start:value_int64]
691 692 693
                output, _ = fluid.layers.tensor_array_to_tensor(slice_arr,
                                                                axis=self.axis,
                                                                use_stack=True)
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725

            loss = fluid.layers.reduce_sum(output)
            fluid.backward.append_backward(loss)
            g_vars = list(
                map(main_program.global_block().var,
                    [each_x.name + "@GRAD" for each_x in x]))
            self.out, self.g_x0, self.g_x1, self.g_x2 = \
                self.exe.run(main_program,
                             feed = {'x0': self.data,
                                     'x1': self.data,
                                     'x2': self.data},
                             fetch_list=[output] + g_vars)

    def test_case_1(self):
        main_program = fluid.Program()
        self.set_program_and_run(main_program, 1)

        self.assertTrue(self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR)
        self.assertEqual(self.sliced_arr.shape, self.shape)
        self.assertTrue(np.array_equal(self.out, self.data))
        self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
        self.assertTrue(np.array_equal(self.g_x1, np.zeros_like(self.data)))
        self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data)))

    def test_case_2(self):
        main_program = fluid.Program()
        self.set_program_and_run(main_program, 2)

        self.assertTrue(
            self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY)
        self.assertEqual(self.sliced_arr.shape, self.shape)
        self.assertTrue(
726 727
            np.array_equal(self.out,
                           np.stack([self.data, self.data], axis=self.axis)))
728 729 730 731
        self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
        self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data)))
        self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data)))

732 733 734 735 736 737 738 739 740 741
    def test_case_3(self):
        main_program = fluid.Program()
        self.set_program_and_run(main_program, 3)

        self.assertTrue(
            self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY)
        self.assertEqual(self.sliced_arr.shape, self.shape)
        self.assertTrue(
            np.array_equal(
                self.out,
742
                np.stack([self.data, self.data, self.data], axis=self.axis)))
743 744 745 746
        self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
        self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data)))
        self.assertTrue(np.array_equal(self.g_x2, np.ones_like(self.data)))

747

748
class TestImperativeVarBaseGetItem(unittest.TestCase):
749

750 751 752 753 754 755 756 757 758 759 760
    def test_getitem_with_long(self):
        with fluid.dygraph.guard():
            data = np.random.random((2, 80, 16128)).astype('float32')
            var = fluid.dygraph.to_variable(data)
            sliced = var[:, 10:, :var.shape[1]]  # var.shape[1] is 80L here
            self.assertEqual(sliced.shape, [2, 70, 80])

            sliced = var[:, var.shape[0]:, var.shape[0]:var.shape[1]]
            self.assertEqual(sliced.shape, [2, 78, 78])

    def test_getitem_with_float(self):
761

762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
        def test_float_in_slice_item():
            with fluid.dygraph.guard():
                data = np.random.random((2, 80, 16128)).astype('float32')
                var = fluid.dygraph.to_variable(data)
                sliced = var[:, 1.1:, :var.shape[1]]

        self.assertRaises(Exception, test_float_in_slice_item)

        def test_float_in_index():
            with fluid.dygraph.guard():
                data = np.random.random((2, 80, 16128)).astype('float32')
                var = fluid.dygraph.to_variable(data)
                sliced = var[1.1]

        self.assertRaises(Exception, test_float_in_index)


779
class TestInferShape(unittest.TestCase):
780

781 782 783 784 785 786 787 788
    def test(self):
        x = paddle.ones(shape=[3, 4, 5])
        x.desc.set_shape([3, -1, 5])
        self.assertEqual(x.shape, (3, -1, 5))

        out0 = paddle.slice(x, axes=[1], starts=[0], ends=[3])
        self.assertEqual(out0.shape, (3, 3, 5))

789 790 791 792 793 794 795
    def test_axis_less_than_zero(self):

        # Using paddle.disable_static will make other unittests fail.
        with fluid.dygraph.guard():
            x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4])
            x = paddle.to_tensor(x_arr)

796 797 798
            pp_slice = paddle.slice(x, [
                100,
            ], [0], [1])
799 800 801
            np_slice = x_arr[:, :, 0:1]
            self.assertTrue(np.array_equal(pp_slice, np_slice))

802
            pp_slice = paddle.slice(x, (-100, ), [0], [1])
803 804 805 806 807 808 809
            np_slice = x_arr[0:1]
            self.assertTrue(np.array_equal(pp_slice, np_slice))

            x_arr = np.array([], dtype=np.float32)
            x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0)))

            starts = paddle.to_tensor(
810
                np.reshape(np.array([], dtype=np.int32), (0, )))
811
            ends = paddle.to_tensor(
812
                np.reshape(np.array([], dtype=np.int32), (0, )))
813 814 815 816 817 818 819 820 821 822 823 824 825

            with self.assertRaises(ValueError):
                paddle.slice(x, [-1000000], starts, ends)

            with self.assertRaises(ValueError):
                paddle.slice(x, [1000000], starts, ends)

            with self.assertRaises(ValueError):
                paddle.slice(x, [], starts, ends)

            with self.assertRaises(ValueError):
                paddle.slice(x, 0, starts, ends)

826

827 828 829
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestImperativeCUDAPinnedInput(unittest.TestCase):
830

831 832 833
    def test_input_cuda_pinned_var(self):
        with fluid.dygraph.guard():
            data = np.random.random((2, 80, 16128)).astype('float32')
834 835 836 837 838
            var = core.VarBase(value=data,
                               name='',
                               persistable=False,
                               place=fluid.CUDAPinnedPlace(),
                               zero_copy=False)
839 840 841 842
            sliced = var[:, 10:, :var.shape[1]]
            self.assertEqual(sliced.shape, [2, 70, 80])


W
whs 已提交
843
if __name__ == '__main__':
H
hong 已提交
844
    paddle.enable_static()
W
whs 已提交
845
    unittest.main()