diff --git a/paddle/fluid/operators/slice_op_npu.cc b/paddle/fluid/operators/slice_op_npu.cc index 8e0d4b4a019921661e1a3da78ed3f69ef20356ec..9e6e6f04edbbfd6784ca5d79e126047de0ef9430 100644 --- a/paddle/fluid/operators/slice_op_npu.cc +++ b/paddle/fluid/operators/slice_op_npu.cc @@ -25,15 +25,16 @@ namespace operators { using Tensor = framework::Tensor; -void UpdateAttr(const framework::DDim in_dims, const std::vector axes, +void UpdateAttr(const framework::DDim& in_dims, const std::vector axes, const std::vector starts, const std::vector ends, std::vector* offsets, std::vector* size) { int cnt = 0; for (int i = 0; i < in_dims.size(); ++i) { int start = 0; int end = in_dims[i]; - int axis = axes[cnt]; - + // NOTE(zhiqiu): Becareful that cnt may > axes.size() and result in + // overflow. + int axis = cnt < static_cast(axes.size()) ? axes[cnt] : -1; if (axis == i) { start = starts[cnt]; if (start < 0) { @@ -63,10 +64,10 @@ class SliceNPUKernel : public framework::OpKernel { auto axes = ctx.Attr>("axes"); auto starts = ctx.Attr>("starts"); auto ends = ctx.Attr>("ends"); + const auto& in_dims = input->dims(); out->mutable_data(ctx.GetPlace()); - auto in_dims = input->dims(); std::vector offsets(in_dims.size()); std::vector size(in_dims.size()); @@ -93,8 +94,7 @@ class SliceGradNPUKernel : public framework::OpKernel { auto axes = ctx.Attr>("axes"); auto starts = ctx.Attr>("starts"); auto ends = ctx.Attr>("ends"); - - auto in_dims = input->dims(); + const auto& in_dims = input->dims(); int rank = in_dims.size(); std::vector offsets(rank); diff --git a/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py index 500618f509f682b00be715ea8214cddaaf892b2e..c57758dca8e5caa8ce5293af0fe4412b6d0a40c3 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_slice_op_npu.py @@ -71,12 +71,12 @@ class TestSliceOp(OpTest): class TestSliceOp2(TestSliceOp): def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) - self.starts = [1, 0, -3] - self.ends = [3, 3, -1] - self.axes = [0, 1, 2] - self.infer_flags = [1, 1, 1] - self.out = self.input[1:3, 0:3, -3:-1, :] + self.input = np.random.random([10, 5, 6]).astype(self.dtype) + self.starts = [0] + self.ends = [1] + self.axes = [1] + self.infer_flags = [1] + self.out = self.input[:, 0:1, :] @unittest.skipIf(not paddle.is_compiled_with_npu(), @@ -118,8 +118,8 @@ class TestSliceNet(unittest.TestCase): prediction = paddle.static.nn.fc(z, size=2, activation='softmax') - cost = paddle.nn.functional.cross_entropy( - input=prediction, label=label) + cost = paddle.fluid.layers.softmax_with_cross_entropy( + logits=prediction, label=label) loss = paddle.mean(cost) sgd = paddle.optimizer.SGD(learning_rate=0.01) sgd.minimize(loss)