test_slice_op.py 23.5 KB
Newer Older
W
whs 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

W
whs 已提交
17 18
import unittest
import numpy as np
19
import paddle.fluid.core as core
20
from op_test import OpTest
21
import paddle.fluid as fluid
22
import paddle.fluid.layers as layers
P
pangyoki 已提交
23
import paddle
W
whs 已提交
24 25


26 27
# Situation 1: starts(list, no tensor), ends(list, no tensor)
# 1.1 without attr(decrease)
W
whs 已提交
28 29 30 31 32 33 34 35 36
class TestSliceOp(OpTest):
    def setUp(self):
        self.op_type = "slice"
        self.config()
        self.inputs = {'Input': self.input}
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
37 38
            'ends': self.ends,
            'infer_flags': self.infer_flags
W
whs 已提交
39 40 41
        }

    def config(self):
42
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
W
whs 已提交
43 44 45
        self.starts = [1, 0, 2]
        self.ends = [3, 3, 4]
        self.axes = [0, 1, 2]
46
        self.infer_flags = [1, 1, 1]
W
whs 已提交
47 48 49 50 51
        self.out = self.input[1:3, 0:3, 2:4, :]

    def test_check_output(self):
        self.check_output()

52 53 54
    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)

W
whs 已提交
55

56 57
class TestCase1(TestSliceOp):
    def config(self):
58
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
59 60 61 62 63 64 65 66 67
        self.starts = [-3, 0, 2]
        self.ends = [3, 100, -1]
        self.axes = [0, 1, 2]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[-3:3, 0:100, 2:-1, :]


class TestCase2(TestSliceOp):
    def config(self):
68
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
69 70 71 72 73 74 75 76
        self.starts = [-3, 0, 2]
        self.ends = [3, 100, -1]
        self.axes = [0, 1, 3]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[-3:3, 0:100, :, 2:-1]


# 1.2 with attr(decrease)
H
Hongyu Liu 已提交
77 78 79 80 81 82 83 84 85 86
class TestSliceOp_decs_dim(OpTest):
    def setUp(self):
        self.op_type = "slice"
        self.config()
        self.inputs = {'Input': self.input}
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
            'ends': self.ends,
87
            'infer_flags': self.infer_flags,
H
Hongyu Liu 已提交
88 89 90 91
            'decrease_axis': self.decrease_axis,
        }

    def config(self):
92
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
H
Hongyu Liu 已提交
93 94 95 96
        self.starts = [1, 0, 2]
        self.ends = [2, 3, 4]
        self.axes = [0, 1, 2]
        self.decrease_axis = [0]
97
        self.infer_flags = [1, 1, 1]
H
Hongyu Liu 已提交
98 99 100 101 102 103 104 105 106
        self.out = self.input[1, 0:3, 2:4, :]

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)


107 108
class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim):
    def config(self):
109
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
110 111 112 113 114 115 116 117 118 119
        self.starts = [1, 0, 2]
        self.ends = [2, 1, 4]
        self.axes = [0, 1, 2]
        self.decrease_axis = [0, 1]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[1, 0, 2:4, :]


class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim):
    def config(self):
120
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
121 122 123 124 125 126 127 128 129 130
        self.starts = [-1, 0, 2]
        self.ends = [1000000, 1, 4]
        self.axes = [0, 1, 2]
        self.decrease_axis = [0, 1]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[-1, 0, 2:4, :]


class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim):
    def config(self):
131
        self.input = np.random.random([3, 4, 5, 7]).astype("float64")
132 133 134 135 136 137 138 139 140 141
        self.starts = [0, 1, 2, 3]
        self.ends = [1, 2, 3, 4]
        self.axes = [0, 1, 2, 3]
        self.decrease_axis = [0, 1, 2, 3]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[0, 1, 2, 3:4]


class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim):
    def config(self):
142
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
143 144 145 146 147 148 149 150 151 152
        self.starts = [-1]
        self.ends = [1000000]
        self.axes = [3]
        self.decrease_axis = [3]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[:, :, :, -1]


class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim):
    def config(self):
153
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
154 155 156 157 158 159 160 161 162 163 164
        self.starts = [0, 1, 2, 3]
        self.ends = [1, 2, 3, 4]
        self.axes = [0, 1, 2, 3]
        self.decrease_axis = [0, 1, 2, 3]
        self.infer_flags = [1, 1, 1]
        self.out = self.input[0, 1, 2, 3:4]


# Situation 2: starts(list, have tensor), ends(list, no tensor)
# without attr(decrease)
class TestSliceOp_starts_ListTensor(OpTest):
H
Hongyu Liu 已提交
165 166 167
    def setUp(self):
        self.op_type = "slice"
        self.config()
168 169 170 171

        starts_tensor = []
        for index, ele in enumerate(self.starts):
            starts_tensor.append(("x" + str(index), np.ones(
172
                (1)).astype('int64') * ele))
173 174

        self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}
H
Hongyu Liu 已提交
175 176 177
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
178
            'starts': self.starts_infer,
H
Hongyu Liu 已提交
179
            'ends': self.ends,
180
            'infer_flags': self.infer_flags
H
Hongyu Liu 已提交
181 182 183
        }

    def config(self):
184
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
H
Hongyu Liu 已提交
185
        self.starts = [1, 0, 2]
186
        self.ends = [3, 3, 4]
H
Hongyu Liu 已提交
187
        self.axes = [0, 1, 2]
188 189 190 191
        self.infer_flags = [-1, 1, -1]
        self.out = self.input[1:3, 0:3, 2:4, :]

        self.starts_infer = [-1, 0, -1]
H
Hongyu Liu 已提交
192 193 194 195 196 197 198 199

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)


200 201 202
# Situation 2: starts(list, have tensor), ends(list, no tensor)
#  with attr(decrease)
class TestSliceOp_decs_dim_starts_ListTensor(OpTest):
H
Hongyu Liu 已提交
203 204 205
    def setUp(self):
        self.op_type = "slice"
        self.config()
206 207 208 209 210 211 212 213

        starts_tensor = []
        for index, ele in enumerate(self.starts):
            starts_tensor.append(("x" + str(index), np.ones(
                (1)).astype('int32') * ele))

        self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}

H
Hongyu Liu 已提交
214 215 216
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
217
            'starts': self.starts_infer,
H
Hongyu Liu 已提交
218
            'ends': self.ends,
219
            'infer_flags': self.infer_flags,
H
Hongyu Liu 已提交
220 221 222 223
            'decrease_axis': self.decrease_axis,
        }

    def config(self):
224
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
225 226
        self.starts = [1, 0, 2]
        self.ends = [2, 3, 4]
H
Hongyu Liu 已提交
227
        self.axes = [0, 1, 2]
228 229 230 231 232
        self.decrease_axis = [0]
        self.infer_flags = [1, -1, 1]
        self.out = self.input[1, 0:3, 2:4, :]

        self.starts_infer = [1, -1, 2]
H
Hongyu Liu 已提交
233 234 235 236 237 238 239 240

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)


241 242 243
class TestSliceOp_decs_dim_5_starts_ListTensor(
        TestSliceOp_decs_dim_starts_ListTensor):
    def config(self):
244
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
245 246 247 248 249 250 251 252 253 254 255 256 257
        self.starts = [-1]
        self.ends = [1000000]
        self.axes = [3]
        self.decrease_axis = [3]
        self.infer_flags = [-1]
        self.out = self.input[:, :, :, -1]

        self.starts_infer = [-1]


# Situation 3: starts(tensor), ends(list, no tensor)
# with attr(decrease)
class TestSliceOp_decs_dim_starts_OneTensor(OpTest):
H
Hongyu Liu 已提交
258 259 260
    def setUp(self):
        self.op_type = "slice"
        self.config()
261 262 263 264 265
        self.inputs = {
            'Input': self.input,
            "StartsTensor": np.array(
                self.starts, dtype="int32")
        }
H
Hongyu Liu 已提交
266 267 268
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
269
            #'starts': self.starts,
H
Hongyu Liu 已提交
270
            'ends': self.ends,
271
            'infer_flags': self.infer_flags,
H
Hongyu Liu 已提交
272 273 274 275
            'decrease_axis': self.decrease_axis,
        }

    def config(self):
276
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
277 278 279 280 281 282
        self.starts = [1, 0, 2]
        self.ends = [2, 3, 4]
        self.axes = [0, 1, 2]
        self.decrease_axis = [0]
        self.infer_flags = [-1, -1, -1]
        self.out = self.input[1, 0:3, 2:4, :]
H
Hongyu Liu 已提交
283 284 285 286 287 288 289 290

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)


291 292 293
# Situation 4: starts(tensor), ends(tensor)
#  without attr(decrease)
class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest):
H
Hongyu Liu 已提交
294 295 296
    def setUp(self):
        self.op_type = "slice"
        self.config()
297 298 299 300

        self.inputs = {
            'Input': self.input,
            "StartsTensor": np.array(
301
                self.starts, dtype="int64"),
302 303 304
            "EndsTensor": np.array(
                self.ends, dtype="int32")
        }
H
Hongyu Liu 已提交
305 306 307
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
308 309 310
            #'starts': self.starts,
            #'ends': self.ends_infer,
            'infer_flags': self.infer_flags
H
Hongyu Liu 已提交
311 312 313
        }

    def config(self):
314
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
315 316 317 318 319
        self.starts = [1, 0, 2]
        self.ends = [3, 3, 4]
        self.axes = [0, 1, 2]
        self.infer_flags = [-1, -1, -1]
        self.out = self.input[1:3, 0:3, 2:4, :]
H
Hongyu Liu 已提交
320 321 322 323 324 325 326 327

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)


328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
# Situation 5: starts(tensor), ends(tensor)
#  with attr(decrease)
class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest):
    def setUp(self):
        self.op_type = "slice"
        self.config()
        self.inputs = {
            'Input': self.input,
            "StartsTensor": np.array(
                self.starts, dtype="int32"),
            "EndsTensor": np.array(
                self.ends, dtype="int32")
        }
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            #'starts': self.starts,
            #'ends': self.ends,
            'infer_flags': self.infer_flags,
            'decrease_axis': self.decrease_axis,
        }

W
whs 已提交
350
    def config(self):
351
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
352 353
        self.starts = [1, 0, 2]
        self.ends = [2, 1, 4]
W
whs 已提交
354
        self.axes = [0, 1, 2]
355 356 357 358 359 360 361 362 363
        self.decrease_axis = [0, 1]
        self.infer_flags = [-1, -1, -1]
        self.out = self.input[1, 0, 2:4, :]

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)
W
whs 已提交
364 365


366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
# Situation 6: starts(tensor), ends(list, have tensor)
# without attr(decrease)
class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest):
    def setUp(self):
        self.op_type = "slice"
        self.config()

        ends_tensor = []
        for index, ele in enumerate(self.ends):
            ends_tensor.append(("y" + str(index), np.ones(
                (1)).astype('int32') * ele))

        self.inputs = {
            'Input': self.input,
            "StartsTensor": np.array(
                self.starts, dtype="int32"),
            'EndsTensorList': ends_tensor
        }
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            #'starts': self.starts,
            'ends': self.ends_infer,
            'infer_flags': self.infer_flags
        }

W
whs 已提交
392
    def config(self):
393
        self.input = np.random.random([3, 4, 5, 6]).astype("float64")
394 395 396 397 398 399 400 401 402 403 404 405 406
        self.starts = [1, 0, 2]
        self.ends = [3, 3, 4]
        self.axes = [0, 1, 2]
        self.infer_flags = [-1, -1, -1]
        self.out = self.input[1:3, 0:3, 2:4, :]

        self.ends_infer = [-1, 3, 4]

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['Input'], 'Out', max_relative_error=0.006)
W
whs 已提交
407 408


409
# Test CUDA float16
410 411
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
412 413 414 415 416 417 418 419 420 421 422 423 424
class TestFP16(OpTest):
    def setUp(self):
        self.op_type = "slice"
        self.config()
        self.inputs = {'Input': self.input}
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
            'ends': self.ends,
            'infer_flags': self.infer_flags
        }

425 426 427 428 429 430 431
    def config(self):
        self.dtype = "float16"
        self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
        self.starts = [-3, 0, 2]
        self.ends = [3, 100, -1]
        self.axes = [0, 1, 3]
        self.out = self.input[-3:3, 0:100, :, 2:-1]
432
        self.infer_flags = [1, 1, 1]
433 434 435 436 437 438 439 440 441 442 443 444 445

    def test_check_output(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_output_with_place(place, atol=1e-5)

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['Input'], 'Out', max_relative_error=0.006)


446 447
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
448 449 450 451 452 453 454 455 456 457 458 459 460
class TestFP16_2(OpTest):
    def setUp(self):
        self.op_type = "slice"
        self.config()
        self.inputs = {'Input': self.input}
        self.outputs = {'Out': self.out}
        self.attrs = {
            'axes': self.axes,
            'starts': self.starts,
            'ends': self.ends,
            'infer_flags': self.infer_flags
        }

461 462
    def config(self):
        self.dtype = "float16"
Z
zhupengyang 已提交
463
        self.input = np.random.random([3, 4, 10]).astype(self.dtype)
464 465 466 467
        self.starts = [0]
        self.ends = [1]
        self.axes = [1]
        self.out = self.input[:, 0:1, :]
468
        self.infer_flags = [1]
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484

    def test_check_output(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_output_with_place(place, atol=1e-5)

    def test_check_grad_normal(self):
        place = core.CUDAPlace(0)
        if core.is_float16_supported(place):
            self.check_grad_with_place(
                place, ['Input'],
                'Out',
                max_relative_error=0.006,
                numeric_grad_delta=0.5)


485
# Test python API
486
class TestSliceAPI(unittest.TestCase):
487
    def test_1(self):
488
        input = np.random.random([3, 4, 5, 6]).astype("float64")
489
        minus_1 = fluid.layers.fill_constant([1], "int32", -1)
490
        minus_3 = fluid.layers.fill_constant([1], "int64", -3)
491 492 493 494 495 496 497 498 499
        starts = fluid.layers.data(
            name='starts', shape=[1, 3], append_batch_size=False)
        ends = fluid.layers.data(
            name='ends', shape=[3], append_batch_size=False)

        x = fluid.layers.data(
            name="x",
            shape=[3, 4, 5, 6],
            append_batch_size=False,
500
            dtype="float64")
501

502 503 504
        # value_int64 is greater than 2147483647 which is the max of int32
        value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648)

505
        out_1 = fluid.layers.slice(
506
            x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1])
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
        out_2 = fluid.layers.slice(
            x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1])
        out_3 = fluid.layers.slice(
            x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1])
        out_4 = fluid.layers.slice(x, axes=[0, 1, 2], starts=starts, ends=ends)

        out_5 = x[-3:3, 0:100, 2:-1]
        out_6 = x[minus_3:3, 0:100, :, 2:-1]
        out_7 = x[minus_1, 0:100, :, 2:minus_1]

        exe = fluid.Executor(place=fluid.CPUPlace())
        res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run(
            fluid.default_main_program(),
            feed={
                "x": input,
                'starts': np.array([-3, 0, 2]).astype("int32"),
                'ends': np.array([3, 100, -1]).astype("int32")
            },
            fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7])

        assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :])
        assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1])
        assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1])
        assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :])
        assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :])
        assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1])
        assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1])


P
pangyoki 已提交
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
class TestSliceApiWithTensor(unittest.TestCase):
    def test_starts_ends_is_tensor(self):
        with paddle.fluid.dygraph.guard():
            a = paddle.rand(shape=[4, 5, 6], dtype='float32')
            axes = [0, 1, 2]
            starts = [-3, 0, 2]
            ends = [3, 2, 4]
            a_1 = paddle.slice(
                a,
                axes=axes,
                starts=paddle.to_tensor(
                    starts, dtype='int32'),
                ends=paddle.to_tensor(
                    ends, dtype='int32'))
            a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends)

            self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy()))


555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
class TestSliceApiWithLoDTensorArray(unittest.TestCase):
    def setUp(self):
        self.shape = (3, 4)
        self.data = np.random.random(size=self.shape).astype('float32')
        self.idx = 0
        self.start = 0
        self.end = 2
        self.axis = 1

        self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
        ) else fluid.CPUPlace()
        self.exe = fluid.Executor(self.place)

    def set_program_and_run(self, main_program, case_num):
        with fluid.program_guard(main_program):
            x = [
                fluid.data(
                    name='x0', shape=self.shape, dtype="float32"), fluid.data(
                        name='x1', shape=self.shape, dtype="float32"),
                fluid.data(
                    name='x2', shape=self.shape, dtype="float32")
            ]

            for each_x in x:
                each_x.stop_gradient = False

            arr = layers.create_array(dtype="float32")
            for i in range(3):
                idx = layers.array_length(arr)
                arr = layers.array_write(x=x[i], i=idx, array=arr)

            if case_num == 1:
                self.sliced_arr = output = arr[0]

            elif case_num == 2:
590 591
                end = fluid.layers.array_length(
                    arr) - 1  # dtype of end is int64
592 593 594
                self.sliced_arr = slice_arr = arr[self.start:end]
                output, _ = fluid.layers.tensor_array_to_tensor(
                    slice_arr, axis=self.axis, use_stack=True)
595 596 597 598 599 600
            elif case_num == 3:
                value_int64 = fluid.layers.fill_constant([1], "int64",
                                                         2147483648)
                self.sliced_arr = slice_arr = arr[self.start:value_int64]
                output, _ = fluid.layers.tensor_array_to_tensor(
                    slice_arr, axis=self.axis, use_stack=True)
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639

            loss = fluid.layers.reduce_sum(output)
            fluid.backward.append_backward(loss)
            g_vars = list(
                map(main_program.global_block().var,
                    [each_x.name + "@GRAD" for each_x in x]))
            self.out, self.g_x0, self.g_x1, self.g_x2 = \
                self.exe.run(main_program,
                             feed = {'x0': self.data,
                                     'x1': self.data,
                                     'x2': self.data},
                             fetch_list=[output] + g_vars)

    def test_case_1(self):
        main_program = fluid.Program()
        self.set_program_and_run(main_program, 1)

        self.assertTrue(self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR)
        self.assertEqual(self.sliced_arr.shape, self.shape)
        self.assertTrue(np.array_equal(self.out, self.data))
        self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
        self.assertTrue(np.array_equal(self.g_x1, np.zeros_like(self.data)))
        self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data)))

    def test_case_2(self):
        main_program = fluid.Program()
        self.set_program_and_run(main_program, 2)

        self.assertTrue(
            self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY)
        self.assertEqual(self.sliced_arr.shape, self.shape)
        self.assertTrue(
            np.array_equal(
                self.out, np.stack(
                    [self.data, self.data], axis=self.axis)))
        self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
        self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data)))
        self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data)))

640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
    def test_case_3(self):
        main_program = fluid.Program()
        self.set_program_and_run(main_program, 3)

        self.assertTrue(
            self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY)
        self.assertEqual(self.sliced_arr.shape, self.shape)
        self.assertTrue(
            np.array_equal(
                self.out,
                np.stack(
                    [self.data, self.data, self.data], axis=self.axis)))
        self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
        self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data)))
        self.assertTrue(np.array_equal(self.g_x2, np.ones_like(self.data)))

656

657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
class TestImperativeVarBaseGetItem(unittest.TestCase):
    def test_getitem_with_long(self):
        with fluid.dygraph.guard():
            data = np.random.random((2, 80, 16128)).astype('float32')
            var = fluid.dygraph.to_variable(data)
            sliced = var[:, 10:, :var.shape[1]]  # var.shape[1] is 80L here
            self.assertEqual(sliced.shape, [2, 70, 80])

            sliced = var[:, var.shape[0]:, var.shape[0]:var.shape[1]]
            self.assertEqual(sliced.shape, [2, 78, 78])

    def test_getitem_with_float(self):
        def test_float_in_slice_item():
            with fluid.dygraph.guard():
                data = np.random.random((2, 80, 16128)).astype('float32')
                var = fluid.dygraph.to_variable(data)
                sliced = var[:, 1.1:, :var.shape[1]]

        self.assertRaises(Exception, test_float_in_slice_item)

        def test_float_in_index():
            with fluid.dygraph.guard():
                data = np.random.random((2, 80, 16128)).astype('float32')
                var = fluid.dygraph.to_variable(data)
                sliced = var[1.1]

        self.assertRaises(Exception, test_float_in_index)


686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestImperativeCUDAPinnedInput(unittest.TestCase):
    def test_input_cuda_pinned_var(self):
        with fluid.dygraph.guard():
            data = np.random.random((2, 80, 16128)).astype('float32')
            var = core.VarBase(
                value=data,
                name='',
                persistable=False,
                place=fluid.CUDAPinnedPlace(),
                zero_copy=False)
            sliced = var[:, 10:, :var.shape[1]]
            self.assertEqual(sliced.shape, [2, 70, 80])


W
whs 已提交
702 703
if __name__ == '__main__':
    unittest.main()