diff --git a/paddle/fluid/operators/lu_op.h b/paddle/fluid/operators/lu_op.h index f323e2e041d994eb01c9d4e934984b8a005ffcec..0c8414edc0f3ea7cb6b214cbbd8976a8457abefc 100644 --- a/paddle/fluid/operators/lu_op.h +++ b/paddle/fluid/operators/lu_op.h @@ -41,9 +41,12 @@ void SetValueCompute(const framework::ExecutionContext& ctx, auto dtype = framework::TransToProtoVarType(in->dtype()); auto in_dims = in->dims(); - CheckAndUpdateSliceAttrs(in_dims, axes, starts, ends, &steps); - auto slice_dims = GetSliceDims(in_dims, axes, *starts, *ends, &steps); - auto decrease_slice_dims = GetDecreasedDims(slice_dims, decrease_axes); + phi::funcs::CheckAndUpdateSliceAttrs(in_dims, axes, starts, ends, + &steps); + auto slice_dims = + phi::funcs::GetSliceDims(in_dims, axes, *starts, *ends, &steps); + auto decrease_slice_dims = + phi::funcs::GetDecreasedDims(slice_dims, decrease_axes); auto slice_dims_for_assign = decrease_slice_dims; if (!none_axes.empty()) { @@ -281,10 +284,10 @@ void SliceCompute(const framework::ExecutionContext& ctx, } } - CheckAndUpdateSliceAttrs(in_dims, axes, &starts, &ends); - slice_dims = - GetSliceDims(in_dims, axes, starts, ends, nullptr, nullptr); - out_dims = GetDecreasedDims(slice_dims, decrease_axis); + phi::funcs::CheckAndUpdateSliceAttrs(in_dims, axes, &starts, &ends); + slice_dims = phi::funcs::GetSliceDims(in_dims, axes, starts, ends, + nullptr, nullptr); + out_dims = phi::funcs::GetDecreasedDims(slice_dims, decrease_axis); // 2.2 Get output auto offsets = Eigen::DSizes(); diff --git a/paddle/fluid/operators/set_value_op.h b/paddle/fluid/operators/set_value_op.h index 9dd727959202c6b09bad0f07aa242a8897583342..4f7eb0357e9e12947accd2f28500c10ef858b697 100644 --- a/paddle/fluid/operators/set_value_op.h +++ b/paddle/fluid/operators/set_value_op.h @@ -25,10 +25,10 @@ #include "paddle/fluid/operators/assign_value_op.h" #include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" -#include "paddle/fluid/operators/slice_utils.h" #include "paddle/fluid/operators/strided_slice_op.h" #include "paddle/fluid/operators/utils.h" #include "paddle/fluid/platform/enforce.h" +#include "paddle/phi/kernels/funcs/slice_utils.h" namespace paddle { namespace operators { @@ -188,9 +188,11 @@ class SetValueKernel : public framework::OpKernel { } auto in_dims = in->dims(); - CheckAndUpdateSliceAttrs(in_dims, axes, &starts, &ends, &steps); - auto slice_dims = GetSliceDims(in_dims, axes, starts, ends, &steps); - auto decrease_slice_dims = GetDecreasedDims(slice_dims, decrease_axes); + phi::funcs::CheckAndUpdateSliceAttrs(in_dims, axes, &starts, &ends, &steps); + auto slice_dims = + phi::funcs::GetSliceDims(in_dims, axes, starts, ends, &steps); + auto decrease_slice_dims = + phi::funcs::GetDecreasedDims(slice_dims, decrease_axes); auto slice_dims_for_assign = decrease_slice_dims; if (!none_axes.empty()) { diff --git a/paddle/fluid/operators/slice_op.cc b/paddle/fluid/operators/slice_op.cc index 689f93593fef4c4f11c723624539d79884946a88..c6432d00e9de16b5635aa80f6dabb63c2b84f46e 100644 --- a/paddle/fluid/operators/slice_op.cc +++ b/paddle/fluid/operators/slice_op.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include +#include "paddle/phi/kernels/funcs/slice_utils.h" namespace paddle { namespace operators { @@ -101,15 +102,17 @@ class SliceOp : public framework::OperatorWithKernel { "The size of ends must be equal to the size of axes.")); } - CheckAndUpdateSliceAttrs(in_dims, axes, &starts, &ends, nullptr, - &infer_flags); + phi::funcs::CheckAndUpdateSliceAttrs(in_dims, axes, &starts, &ends, + nullptr, &infer_flags); - auto slice_dims = - GetSliceDims(in_dims, axes, starts, ends, nullptr, &infer_flags); + auto slice_dims = phi::funcs::GetSliceDims(in_dims, axes, starts, ends, + nullptr, &infer_flags); if (ctx->IsRuntime()) { - out_dims = GetDecreasedDims(slice_dims, decrease_axis, &infer_flags); + out_dims = phi::funcs::GetDecreasedDims(slice_dims, decrease_axis, + &infer_flags); } else { - out_dims = GetDecreasedDims(slice_dims, decrease_axis, nullptr); + out_dims = + phi::funcs::GetDecreasedDims(slice_dims, decrease_axis, nullptr); } ctx->SetOutputDim("Out", out_dims); diff --git a/paddle/fluid/operators/slice_op.h b/paddle/fluid/operators/slice_op.h index ada8b5a5aab32bbd7ecfe20c1e63bfcfb5e3b707..59db3cff32bfb4bead2aa1bbebc56efbd624cd95 100644 --- a/paddle/fluid/operators/slice_op.h +++ b/paddle/fluid/operators/slice_op.h @@ -18,7 +18,6 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/eigen/eigen_function.h" -#include "paddle/fluid/operators/slice_utils.h" #include "paddle/fluid/operators/utils.h" #include "paddle/phi/kernels/funcs/math_function.h" diff --git a/paddle/phi/kernels/cpu/slice_grad_kernel.cc b/paddle/phi/kernels/cpu/slice_grad_kernel.cc index 641447f20f040b4cc00d6eb7bf53988f10d783f2..5c2cb3ea80e8765265c78a64e5da2f6e688d0970 100644 --- a/paddle/phi/kernels/cpu/slice_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/slice_grad_kernel.cc @@ -29,5 +29,4 @@ PD_REGISTER_KERNEL(slice_grad, double, phi::dtype::complex, phi::dtype::complex, - phi::dtype::bfloat16, - phi::dtype::float16) {} + phi::dtype::bfloat16) {} diff --git a/paddle/phi/kernels/funcs/slice_utils.h b/paddle/phi/kernels/funcs/slice_utils.h index 568aa2b18c22486489249218306a3a1b0022f971..0c956248fd9ef10e80f0e48f82e89ee5986a2b15 100644 --- a/paddle/phi/kernels/funcs/slice_utils.h +++ b/paddle/phi/kernels/funcs/slice_utils.h @@ -19,6 +19,8 @@ limitations under the License. */ namespace phi { +namespace funcs { + template inline void CheckAndUpdateSliceAttrs(const DDim in_dims, const std::vector& axes, @@ -161,4 +163,5 @@ inline DDim GetDecreasedDims(const DDim slice_dims, return decreased_dims; } +} // namespace funcs } // namespace phi diff --git a/paddle/phi/kernels/gpu/slice_grad_kernel.cu b/paddle/phi/kernels/gpu/slice_grad_kernel.cu deleted file mode 100644 index 0c82be2371ad1e588cb6af829df38ee3ff4ef1d4..0000000000000000000000000000000000000000 --- a/paddle/phi/kernels/gpu/slice_grad_kernel.cu +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/phi/kernels/impl/slice_grad_kernel_impl.h" -#include "paddle/phi/kernels/slice_grad_kernel.h" - -#include "paddle/phi/backends/gpu/gpu_context.h" -#include "paddle/phi/core/kernel_registry.h" - -PD_REGISTER_KERNEL(slice_grad, - GPU, - ALL_LAYOUT, - phi::SliceGradRawKernel, - bool, - int, - int64_t, - float, - double, - phi::dtype::complex, - phi::dtype::complex, - phi::dtype::bfloat16, - phi::dtype::float16) {} diff --git a/paddle/phi/kernels/gpu/slice_kernel.cu.cc b/paddle/phi/kernels/gpu/slice_kernel.cu.cc index 016edf4e5c1258011a6f158015c8fe792d48822c..0fa61962c9eb02f14471c2a0d08742d7f7a101ee 100644 --- a/paddle/phi/kernels/gpu/slice_kernel.cu.cc +++ b/paddle/phi/kernels/gpu/slice_kernel.cu.cc @@ -29,4 +29,5 @@ PD_REGISTER_KERNEL(slice, double, phi::dtype::complex, phi::dtype::complex, - phi::dtype::bfloat16) {} + phi::dtype::bfloat16, + phi::dtype::float16) {} diff --git a/paddle/phi/kernels/impl/slice_grad_kernel_impl.h b/paddle/phi/kernels/impl/slice_grad_kernel_impl.h index 6ec95dc34d6b1900be6e8bb2b68a4dd44a90c9f7..bc4246a3849ee761bb68c6078f55e86674b0b2b4 100644 --- a/paddle/phi/kernels/impl/slice_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/slice_grad_kernel_impl.h @@ -66,136 +66,143 @@ void EigenPaddingCompute( // if dimension less than 3, cannot reduce dimension LaunchEigenPadding( context, d_input, in_dims, d_out, out_dims, paddings); - } - // } else { // else we can reduce dimension - // // count not-zero padding number, and record the dimension - // int need_pad_num = 0, pad_dim = -1; - // for (size_t i = 0; i < D; i++) { - // if (paddings[i].first != 0 || paddings[i].second != 0) { - // need_pad_num++; - // pad_dim = i; - // } - // } + } else { // else we can reduce dimension + // count not-zero padding number, and record the dimension + int need_pad_num = 0, pad_dim = -1; + for (size_t i = 0; i < D; i++) { + if (paddings[i].first != 0 || paddings[i].second != 0) { + need_pad_num++; + pad_dim = i; + } + } - // if (need_pad_num == 1) { - // // only need padding one dimension, we can reduce dimension. - // // only the padding dimension is available for us. - // // How to reduce dimension(5 to 3 for example): - // // before(D=5): - // // in_dims: [x1, x2, x3, x4, x5] - // // padding.first: [0, 0, a, 0, 0] - // // padding.second: [0, 0, b, 0, 0] - // // | | - // // V V - // // after(D=3): - // // reshaped_in_dims: [x1*x2, x3, x4*x5] - // // reshaped_padding.first: [0, a, 0] - // // reshaped_padding.second: [0, b, 0] + if (need_pad_num == 1) { + // only need padding one dimension, we can reduce dimension. + // only the padding dimension is available for us. + // How to reduce dimension(5 to 3 for example): + // before(D=5): + // in_dims: [x1, x2, x3, x4, x5] + // padding.first: [0, 0, a, 0, 0] + // padding.second: [0, 0, b, 0, 0] + // | | + // V V + // after(D=3): + // reshaped_in_dims: [x1*x2, x3, x4*x5] + // reshaped_padding.first: [0, a, 0] + // reshaped_padding.second: [0, b, 0] - // if (pad_dim == D - 1) { - // // only last dimension need padding, - // // reshape the dimension of tensor in 2: [preceding, padding] - // std::vector in_tore_shape(2, 1), out_tore_shape(2, 1); - // Eigen::array, 2> reshaped_padding; + if (pad_dim == D - 1) { + // only last dimension need padding, + // reshape the dimension of tensor in 2: [preceding, padding] + std::vector in_tore_shape(2, 1), out_tore_shape(2, 1); + Eigen::array, 2> reshaped_padding; - // // first dimension is the accumulate of preceding dimension - // for (int i = 0; i < pad_dim; i++) { - // in_tore_shape[0] *= in_dims[i]; - // out_tore_shape[0] *= out_dims[i]; - // } - // // second dimension is the padding dimension - // in_tore_shape[1] = in_dims[pad_dim]; - // out_tore_shape[1] = out_dims[pad_dim]; + // first dimension is the accumulate of preceding dimension + for (int i = 0; i < pad_dim; i++) { + in_tore_shape[0] *= in_dims[i]; + out_tore_shape[0] *= out_dims[i]; + } + // second dimension is the padding dimension + in_tore_shape[1] = in_dims[pad_dim]; + out_tore_shape[1] = out_dims[pad_dim]; - // // convert array from std::vector to DDim - // DDim reshaped_in_dims = make_ddim(in_tore_shape); - // DDim reshaped_out_dims = make_ddim(out_tore_shape); + // convert array from std::vector to DDim + DDim reshaped_in_dims = make_ddim(in_tore_shape); + DDim reshaped_out_dims = make_ddim(out_tore_shape); - // // after reshape: the first dimension do not need padding, - // // set padding[0] zero - // reshaped_padding[0].first = reshaped_padding[0].second = 0; - // // the second dimension is the previous padding dimension - // reshaped_padding[1].first = paddings[pad_dim].first; - // reshaped_padding[1].second = paddings[pad_dim].second; + // after reshape: the first dimension do not need padding, + // set padding[0] zero + reshaped_padding[0].first = reshaped_padding[0].second = 0; + // the second dimension is the previous padding dimension + reshaped_padding[1].first = paddings[pad_dim].first; + reshaped_padding[1].second = paddings[pad_dim].second; - // LaunchEigenPadding(context, d_input, reshaped_in_dims, - // d_out, - // reshaped_out_dims, reshaped_padding); - // } else if (pad_dim == 0) { - // // only first dimension need padding, - // // reshape the dimension of tensor in 2: [padding, succeeding] - // // similar to (D - 1) - // std::vector in_tore_shape(2, 1), out_tore_shape(2, 1); - // Eigen::array, 2> reshaped_padding; + LaunchEigenPadding(context, + d_input, + reshaped_in_dims, + d_out, + reshaped_out_dims, + reshaped_padding); + } else if (pad_dim == 0) { + // only first dimension need padding, + // reshape the dimension of tensor in 2: [padding, succeeding] + // similar to (D - 1) + std::vector in_tore_shape(2, 1), out_tore_shape(2, 1); + Eigen::array, 2> reshaped_padding; - // // first dimension is the padding dimension - // in_tore_shape[0] = in_dims[pad_dim]; - // out_tore_shape[0] = out_dims[pad_dim]; - // // sencond dimension is the accumulate of succeeding dimension - // for (size_t i = pad_dim + 1; i < D; i++) { - // in_tore_shape[1] *= in_dims[i]; - // out_tore_shape[1] *= out_dims[i]; - // } + // first dimension is the padding dimension + in_tore_shape[0] = in_dims[pad_dim]; + out_tore_shape[0] = out_dims[pad_dim]; + // sencond dimension is the accumulate of succeeding dimension + for (size_t i = pad_dim + 1; i < D; i++) { + in_tore_shape[1] *= in_dims[i]; + out_tore_shape[1] *= out_dims[i]; + } - // // convert array from std::vector to DDim - // DDim reshaped_in_dims = make_ddim(in_tore_shape); - // DDim reshaped_out_dims = make_ddim(out_tore_shape); + // convert array from std::vector to DDim + DDim reshaped_in_dims = make_ddim(in_tore_shape); + DDim reshaped_out_dims = make_ddim(out_tore_shape); - // // after reshape: - // // the first dimension is the previous padding dimension - // reshaped_padding[0].first = paddings[pad_dim].first; - // reshaped_padding[0].second = paddings[pad_dim].second; - // // the second dimension do not need padding, set padding[1] zero - // reshaped_padding[1].first = reshaped_padding[1].second = 0; + // after reshape: + // the first dimension is the previous padding dimension + reshaped_padding[0].first = paddings[pad_dim].first; + reshaped_padding[0].second = paddings[pad_dim].second; + // the second dimension do not need padding, set padding[1] zero + reshaped_padding[1].first = reshaped_padding[1].second = 0; - // LaunchEigenPadding(context, d_input, reshaped_in_dims, - // d_out, - // reshaped_out_dims, reshaped_padding); - // } else { - // // other dimension need padding - // // reshape the dimension of tensor in 3: - // // [preceding, padding, succeeding] - // std::vector in_tore_shape(3, 1), out_tore_shape(3, 1); - // Eigen::array, 3> reshaped_padding; + LaunchEigenPadding(context, + d_input, + reshaped_in_dims, + d_out, + reshaped_out_dims, + reshaped_padding); + } else { + // other dimension need padding + // reshape the dimension of tensor in 3: + // [preceding, padding, succeeding] + std::vector in_tore_shape(3, 1), out_tore_shape(3, 1); + Eigen::array, 3> reshaped_padding; - // // first dimension is the accumulate of preceding dimension - // for (int i = 0; i < pad_dim; i++) { - // in_tore_shape[0] *= in_dims[i]; - // out_tore_shape[0] *= out_dims[i]; - // } - // // second dimension is the padding dimension - // in_tore_shape[1] = in_dims[pad_dim]; - // out_tore_shape[1] = out_dims[pad_dim]; - // // third dimension is the accumulate of succeeding dimension - // for (size_t i = pad_dim + 1; i < D; i++) { - // in_tore_shape[2] *= in_dims[i]; - // out_tore_shape[2] *= out_dims[i]; - // } + // first dimension is the accumulate of preceding dimension + for (int i = 0; i < pad_dim; i++) { + in_tore_shape[0] *= in_dims[i]; + out_tore_shape[0] *= out_dims[i]; + } + // second dimension is the padding dimension + in_tore_shape[1] = in_dims[pad_dim]; + out_tore_shape[1] = out_dims[pad_dim]; + // third dimension is the accumulate of succeeding dimension + for (size_t i = pad_dim + 1; i < D; i++) { + in_tore_shape[2] *= in_dims[i]; + out_tore_shape[2] *= out_dims[i]; + } - // // convert array from std::vector to DDim - // DDim reshaped_in_dims = make_ddim(in_tore_shape); - // DDim reshaped_out_dims = make_ddim(out_tore_shape); + // convert array from std::vector to DDim + DDim reshaped_in_dims = make_ddim(in_tore_shape); + DDim reshaped_out_dims = make_ddim(out_tore_shape); - // // after reshape: - // // the first dimension do not need padding, set padding[0] zero - // reshaped_padding[0].first = reshaped_padding[2].second = 0; - // // the second dimension is the previous padding dimension - // reshaped_padding[1].first = paddings[pad_dim].first; - // reshaped_padding[1].second = paddings[pad_dim].second; - // // the third dimension do not need padding, set padding[2] zero - // reshaped_padding[2].first = reshaped_padding[2].second = 0; + // after reshape: + // the first dimension do not need padding, set padding[0] zero + reshaped_padding[0].first = reshaped_padding[2].second = 0; + // the second dimension is the previous padding dimension + reshaped_padding[1].first = paddings[pad_dim].first; + reshaped_padding[1].second = paddings[pad_dim].second; + // the third dimension do not need padding, set padding[2] zero + reshaped_padding[2].first = reshaped_padding[2].second = 0; - // LaunchEigenPadding(context, d_input, reshaped_in_dims, - // d_out, - // reshaped_out_dims, reshaped_padding); - // } - // } else { - // // need padding at many dimension, cannot reduce dimension - // LaunchEigenPadding(context, d_input, in_dims, d_out, - // out_dims, - // paddings); - // } - // } + LaunchEigenPadding(context, + d_input, + reshaped_in_dims, + d_out, + reshaped_out_dims, + reshaped_padding); + } + } else { + // need padding at many dimension, cannot reduce dimension + LaunchEigenPadding( + context, d_input, in_dims, d_out, out_dims, paddings); + } + } } template diff --git a/paddle/phi/kernels/impl/slice_kernel_impl.h b/paddle/phi/kernels/impl/slice_kernel_impl.h index 54d199e53925da011a8b73932411267338d3dcef..5ee138eee67e260862d15ff37f2078d99aa2fd6c 100644 --- a/paddle/phi/kernels/impl/slice_kernel_impl.h +++ b/paddle/phi/kernels/impl/slice_kernel_impl.h @@ -60,10 +60,10 @@ void SliceCompute(const Context& ctx, } } - CheckAndUpdateSliceAttrs(in_dims, axes, &starts, &ends); - slice_dims = - GetSliceDims(in_dims, axes, starts, ends, nullptr, nullptr); - out_dims = GetDecreasedDims(slice_dims, decrease_axis); + funcs::CheckAndUpdateSliceAttrs(in_dims, axes, &starts, &ends); + slice_dims = funcs::GetSliceDims( + in_dims, axes, starts, ends, nullptr, nullptr); + out_dims = funcs::GetDecreasedDims(slice_dims, decrease_axis); // 2.2 Get output auto offsets = Eigen::DSizes(); diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index 629d61d01b283b501996d2dbad366839ff9d8e68..7bc107205daf68114de0a4f26246a661ebc0563b 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -55,745 +55,722 @@ class TestSliceOp(OpTest): self.check_grad(['Input'], 'Out', max_relative_error=0.006) -class TestCase1(TestSliceOp): - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [-3, 0, 2] - self.ends = [3, 100, -1] - self.axes = [0, 1, 2] - self.infer_flags = [1, 1, 1] - self.out = self.input[-3:3, 0:100, 2:-1, :] - - -class TestCase2(TestSliceOp): - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [-3, 0, 2] - self.ends = [3, 100, -1] - self.axes = [0, 1, 3] - self.infer_flags = [1, 1, 1] - self.out = self.input[-3:3, 0:100, :, 2:-1] - - -# 1.2 with attr(decrease) -class TestSliceOp_decs_dim(OpTest): - def setUp(self): - self.op_type = "slice" - self.config() - self.inputs = {'Input': self.input} - self.outputs = {'Out': self.out} - self.attrs = { - 'axes': self.axes, - 'starts': self.starts, - 'ends': self.ends, - 'infer_flags': self.infer_flags, - 'decrease_axis': self.decrease_axis, - } - - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [1, 0, 2] - self.ends = [2, 3, 4] - self.axes = [0, 1, 2] - self.decrease_axis = [0] - self.infer_flags = [1, 1, 1] - self.out = self.input[1, 0:3, 2:4, :] - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['Input'], 'Out', max_relative_error=0.006) - - -class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim): - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [1, 0, 2] - self.ends = [2, 1, 4] - self.axes = [0, 1, 2] - self.decrease_axis = [0, 1] - self.infer_flags = [1, 1, 1] - self.out = self.input[1, 0, 2:4, :] - - -class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim): - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [-1, 0, 2] - self.ends = [1000000, 1, 4] - self.axes = [0, 1, 2] - self.decrease_axis = [0, 1] - self.infer_flags = [1, 1, 1] - self.out = self.input[-1, 0, 2:4, :] - - -class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim): - def config(self): - self.input = np.random.random([3, 4, 5, 7]).astype("float64") - self.starts = [0, 1, 2, 3] - self.ends = [1, 2, 3, 4] - self.axes = [0, 1, 2, 3] - self.decrease_axis = [0, 1, 2, 3] - self.infer_flags = [1, 1, 1] - self.out = self.input[0, 1, 2, 3:4] - - -class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim): - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [-1] - self.ends = [1000000] - self.axes = [3] - self.decrease_axis = [3] - self.infer_flags = [1, 1, 1] - self.out = self.input[:, :, :, -1] - - -class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim): - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [0, 1, 2, 3] - self.ends = [1, 2, 3, 4] - self.axes = [0, 1, 2, 3] - self.decrease_axis = [0, 1, 2, 3] - self.infer_flags = [1, 1, 1] - self.out = self.input[0, 1, 2, 3:4] - - -# Situation 2: starts(list, have tensor), ends(list, no tensor) -# without attr(decrease) -class TestSliceOp_starts_ListTensor(OpTest): - def setUp(self): - self.op_type = "slice" - self.config() - - starts_tensor = [] - for index, ele in enumerate(self.starts): - starts_tensor.append(("x" + str(index), np.ones( - (1)).astype('int64') * ele)) - - self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} - self.outputs = {'Out': self.out} - self.attrs = { - 'axes': self.axes, - 'starts': self.starts_infer, - 'ends': self.ends, - 'infer_flags': self.infer_flags - } - - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [1, 0, 2] - self.ends = [3, 3, 4] - self.axes = [0, 1, 2] - self.infer_flags = [-1, 1, -1] - self.out = self.input[1:3, 0:3, 2:4, :] - - self.starts_infer = [-1, 0, -1] - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['Input'], 'Out', max_relative_error=0.006) - - -# Situation 2: starts(list, have tensor), ends(list, no tensor) -# with attr(decrease) -class TestSliceOp_decs_dim_starts_ListTensor(OpTest): - def setUp(self): - self.op_type = "slice" - self.config() - - starts_tensor = [] - for index, ele in enumerate(self.starts): - starts_tensor.append(("x" + str(index), np.ones( - (1)).astype('int32') * ele)) - - self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} - - self.outputs = {'Out': self.out} - self.attrs = { - 'axes': self.axes, - 'starts': self.starts_infer, - 'ends': self.ends, - 'infer_flags': self.infer_flags, - 'decrease_axis': self.decrease_axis, - } - - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [1, 0, 2] - self.ends = [2, 3, 4] - self.axes = [0, 1, 2] - self.decrease_axis = [0] - self.infer_flags = [1, -1, 1] - self.out = self.input[1, 0:3, 2:4, :] - - self.starts_infer = [1, -1, 2] - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['Input'], 'Out', max_relative_error=0.006) - - -class TestSliceOp_decs_dim_5_starts_ListTensor( - TestSliceOp_decs_dim_starts_ListTensor): - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [-1] - self.ends = [1000000] - self.axes = [3] - self.decrease_axis = [3] - self.infer_flags = [-1] - self.out = self.input[:, :, :, -1] - - self.starts_infer = [-1] - - -# Situation 3: starts(tensor), ends(list, no tensor) -# with attr(decrease) -class TestSliceOp_decs_dim_starts_OneTensor(OpTest): - def setUp(self): - self.op_type = "slice" - self.config() - self.inputs = { - 'Input': self.input, - "StartsTensor": np.array( - self.starts, dtype="int32") - } - self.outputs = {'Out': self.out} - self.attrs = { - 'axes': self.axes, - #'starts': self.starts, - 'ends': self.ends, - 'infer_flags': self.infer_flags, - 'decrease_axis': self.decrease_axis, - } - - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [1, 0, 2] - self.ends = [2, 3, 4] - self.axes = [0, 1, 2] - self.decrease_axis = [0] - self.infer_flags = [-1, -1, -1] - self.out = self.input[1, 0:3, 2:4, :] - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['Input'], 'Out', max_relative_error=0.006) - - -# Situation 4: starts(tensor), ends(tensor) -# without attr(decrease) -class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): - def setUp(self): - self.op_type = "slice" - self.config() - - self.inputs = { - 'Input': self.input, - "StartsTensor": np.array( - self.starts, dtype="int64"), - "EndsTensor": np.array( - self.ends, dtype="int32") - } - self.outputs = {'Out': self.out} - self.attrs = { - 'axes': self.axes, - #'starts': self.starts, - #'ends': self.ends_infer, - 'infer_flags': self.infer_flags - } - - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [1, 0, 2] - self.ends = [3, 3, 4] - self.axes = [0, 1, 2] - self.infer_flags = [-1, -1, -1] - self.out = self.input[1:3, 0:3, 2:4, :] - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['Input'], 'Out', max_relative_error=0.006) - - -# Situation 5: starts(tensor), ends(tensor) -# with attr(decrease) -class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest): - def setUp(self): - self.op_type = "slice" - self.config() - self.inputs = { - 'Input': self.input, - "StartsTensor": np.array( - self.starts, dtype="int32"), - "EndsTensor": np.array( - self.ends, dtype="int32") - } - self.outputs = {'Out': self.out} - self.attrs = { - 'axes': self.axes, - #'starts': self.starts, - #'ends': self.ends, - 'infer_flags': self.infer_flags, - 'decrease_axis': self.decrease_axis, - } - - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [1, 0, 2] - self.ends = [2, 1, 4] - self.axes = [0, 1, 2] - self.decrease_axis = [0, 1] - self.infer_flags = [-1, -1, -1] - self.out = self.input[1, 0, 2:4, :] - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['Input'], 'Out', max_relative_error=0.006) - - -# Situation 6: starts(tensor), ends(list, have tensor) -# without attr(decrease) -class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): - def setUp(self): - self.op_type = "slice" - self.config() - - ends_tensor = [] - for index, ele in enumerate(self.ends): - ends_tensor.append(("y" + str(index), np.ones( - (1)).astype('int32') * ele)) - - self.inputs = { - 'Input': self.input, - "StartsTensor": np.array( - self.starts, dtype="int32"), - 'EndsTensorList': ends_tensor - } - self.outputs = {'Out': self.out} - self.attrs = { - 'axes': self.axes, - #'starts': self.starts, - 'ends': self.ends_infer, - 'infer_flags': self.infer_flags - } - - def config(self): - self.input = np.random.random([3, 4, 5, 6]).astype("float64") - self.starts = [1, 0, 2] - self.ends = [3, 3, 4] - self.axes = [0, 1, 2] - self.infer_flags = [-1, -1, -1] - self.out = self.input[1:3, 0:3, 2:4, :] - - self.ends_infer = [-1, 3, 4] - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['Input'], 'Out', max_relative_error=0.006) - - -# Test CUDA float16 -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") -class TestFP16(OpTest): - def setUp(self): - self.op_type = "slice" - self.config() - self.inputs = {'Input': self.input} - self.outputs = {'Out': self.out} - self.attrs = { - 'axes': self.axes, - 'starts': self.starts, - 'ends': self.ends, - 'infer_flags': self.infer_flags - } - - def config(self): - self.dtype = "float16" - self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) - self.starts = [-3, 0, 2] - self.ends = [3, 100, -1] - self.axes = [0, 1, 3] - self.out = self.input[-3:3, 0:100, :, 2:-1] - self.infer_flags = [1, 1, 1] - - def test_check_output(self): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-5) - - def test_check_grad_normal(self): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_grad_with_place( - place, ['Input'], 'Out', max_relative_error=0.006) - - -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") -class TestFP16_2(OpTest): - def setUp(self): - self.op_type = "slice" - self.config() - self.inputs = {'Input': self.input} - self.outputs = {'Out': self.out} - self.attrs = { - 'axes': self.axes, - 'starts': self.starts, - 'ends': self.ends, - 'infer_flags': self.infer_flags - } - - def config(self): - self.dtype = "float16" - self.input = np.random.random([3, 4, 10]).astype(self.dtype) - self.starts = [0] - self.ends = [1] - self.axes = [1] - self.out = self.input[:, 0:1, :] - self.infer_flags = [1] - - def test_check_output(self): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-5) - - def test_check_grad_normal(self): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_grad_with_place( - place, ['Input'], - 'Out', - max_relative_error=0.006, - numeric_grad_delta=0.5) - - -class TestBF16(OpTest): - def setUp(self): - self.op_type = "slice" - self.config() - self.inputs = {'Input': convert_float_to_uint16(self.input)} - self.outputs = {'Out': convert_float_to_uint16(self.out)} - self.attrs = { - 'axes': self.axes, - 'starts': self.starts, - 'ends': self.ends, - 'infer_flags': self.infer_flags - } - - def config(self): - self.dtype = np.uint16 - self.input = np.random.random([3, 4, 5, 6]).astype(np.float32) - self.starts = [-3, 0, 2] - self.ends = [3, 100, -1] - self.axes = [0, 1, 3] - self.out = self.input[-3:3, 0:100, :, 2:-1] - self.infer_flags = [1, 1, 1] - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['Input'], 'Out') - - -# Test python API -class TestSliceAPI(unittest.TestCase): - def test_1(self): - input = np.random.random([3, 4, 5, 6]).astype("float64") - minus_1 = fluid.layers.fill_constant([1], "int32", -1) - minus_3 = fluid.layers.fill_constant([1], "int64", -3) - starts = fluid.layers.data( - name='starts', shape=[1, 3], append_batch_size=False) - ends = fluid.layers.data( - name='ends', shape=[3], append_batch_size=False) - - x = fluid.layers.data( - name="x", - shape=[3, 4, 5, 6], - append_batch_size=False, - dtype="float64") - - # value_int64 is greater than 2147483647 which is the max of int32 - value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648) - - out_1 = fluid.layers.slice( - x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1]) - out_2 = fluid.layers.slice( - x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1]) - out_3 = fluid.layers.slice( - x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1]) - out_4 = fluid.layers.slice(x, axes=[0, 1, 2], starts=starts, ends=ends) - - out_5 = x[-3:3, 0:100, 2:-1] - out_6 = x[minus_3:3, 0:100, :, 2:-1] - out_7 = x[minus_1, 0:100, :, 2:minus_1] - - exe = fluid.Executor(place=fluid.CPUPlace()) - res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run( - fluid.default_main_program(), - feed={ - "x": input, - 'starts': np.array([-3, 0, 2]).astype("int32"), - 'ends': np.array([3, 100, -1]).astype("int32") - }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7]) - - assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) - assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) - assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) - assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) - assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :]) - assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1]) - assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1]) - - -class TestSliceApiWithTensor(unittest.TestCase): - def test_starts_ends_is_tensor(self): - with paddle.fluid.dygraph.guard(): - a = paddle.rand(shape=[4, 5, 6], dtype='float32') - axes = [0, 1, 2] - starts = [-3, 0, 2] - ends = [3, 2, 4] - a_1 = paddle.slice( - a, - axes=axes, - starts=paddle.to_tensor( - starts, dtype='int32'), - ends=paddle.to_tensor( - ends, dtype='int32')) - a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends) - - self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy())) - - def test_bool_tensor(self): - with paddle.fluid.dygraph.guard(): - array = (np.arange(60).reshape([3, 4, 5]) % 3).astype('bool') - tt = paddle.to_tensor(array) - tt.stop_gradient = False - - starts = [0, 1, 2] - ends = [3, 5, 4] - axes = [0, 1, 2] - - y_paddle = paddle.slice(tt, axes, starts, ends) - y_np = tt[0:3, 1:5, 2:4] - - self.assertTrue(paddle.bool == y_paddle.dtype) - self.assertTrue(np.array_equal(y_paddle.numpy(), y_np)) - - -class TestSliceApiWithLoDTensorArray(unittest.TestCase): - def setUp(self): - self.shape = (3, 4) - self.data = np.random.random(size=self.shape).astype('float32') - self.idx = 0 - self.start = 0 - self.end = 2 - self.axis = 1 - - self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( - ) else fluid.CPUPlace() - self.exe = fluid.Executor(self.place) - - def set_program_and_run(self, main_program, case_num): - with fluid.program_guard(main_program): - x = [ - fluid.data( - name='x0', shape=self.shape, dtype="float32"), fluid.data( - name='x1', shape=self.shape, dtype="float32"), - fluid.data( - name='x2', shape=self.shape, dtype="float32") - ] - - for each_x in x: - each_x.stop_gradient = False - - arr = layers.create_array(dtype="float32") - for i in range(3): - idx = layers.array_length(arr) - arr = layers.array_write(x=x[i], i=idx, array=arr) - - if case_num == 1: - self.sliced_arr = output = arr[0] - - elif case_num == 2: - end = fluid.layers.array_length( - arr) - 1 # dtype of end is int64 - self.sliced_arr = slice_arr = arr[self.start:end] - output, _ = fluid.layers.tensor_array_to_tensor( - slice_arr, axis=self.axis, use_stack=True) - elif case_num == 3: - value_int64 = fluid.layers.fill_constant([1], "int64", - 2147483648) - self.sliced_arr = slice_arr = arr[self.start:value_int64] - output, _ = fluid.layers.tensor_array_to_tensor( - slice_arr, axis=self.axis, use_stack=True) - - loss = fluid.layers.reduce_sum(output) - fluid.backward.append_backward(loss) - g_vars = list( - map(main_program.global_block().var, - [each_x.name + "@GRAD" for each_x in x])) - self.out, self.g_x0, self.g_x1, self.g_x2 = \ - self.exe.run(main_program, - feed = {'x0': self.data, - 'x1': self.data, - 'x2': self.data}, - fetch_list=[output] + g_vars) - - def test_case_1(self): - main_program = fluid.Program() - self.set_program_and_run(main_program, 1) - - self.assertTrue(self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR) - self.assertEqual(self.sliced_arr.shape, self.shape) - self.assertTrue(np.array_equal(self.out, self.data)) - self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) - self.assertTrue(np.array_equal(self.g_x1, np.zeros_like(self.data))) - self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data))) - - def test_case_2(self): - main_program = fluid.Program() - self.set_program_and_run(main_program, 2) - - self.assertTrue( - self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY) - self.assertEqual(self.sliced_arr.shape, self.shape) - self.assertTrue( - np.array_equal( - self.out, np.stack( - [self.data, self.data], axis=self.axis))) - self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) - self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data))) - self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data))) - - def test_case_3(self): - main_program = fluid.Program() - self.set_program_and_run(main_program, 3) - - self.assertTrue( - self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY) - self.assertEqual(self.sliced_arr.shape, self.shape) - self.assertTrue( - np.array_equal( - self.out, - np.stack( - [self.data, self.data, self.data], axis=self.axis))) - self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) - self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data))) - self.assertTrue(np.array_equal(self.g_x2, np.ones_like(self.data))) - - -class TestImperativeVarBaseGetItem(unittest.TestCase): - def test_getitem_with_long(self): - with fluid.dygraph.guard(): - data = np.random.random((2, 80, 16128)).astype('float32') - var = fluid.dygraph.to_variable(data) - sliced = var[:, 10:, :var.shape[1]] # var.shape[1] is 80L here - self.assertEqual(sliced.shape, [2, 70, 80]) - - sliced = var[:, var.shape[0]:, var.shape[0]:var.shape[1]] - self.assertEqual(sliced.shape, [2, 78, 78]) - - def test_getitem_with_float(self): - def test_float_in_slice_item(): - with fluid.dygraph.guard(): - data = np.random.random((2, 80, 16128)).astype('float32') - var = fluid.dygraph.to_variable(data) - sliced = var[:, 1.1:, :var.shape[1]] - - self.assertRaises(Exception, test_float_in_slice_item) - - def test_float_in_index(): - with fluid.dygraph.guard(): - data = np.random.random((2, 80, 16128)).astype('float32') - var = fluid.dygraph.to_variable(data) - sliced = var[1.1] - - self.assertRaises(Exception, test_float_in_index) - - -class TestInferShape(unittest.TestCase): - def test(self): - x = paddle.ones(shape=[3, 4, 5]) - x.desc.set_shape([3, -1, 5]) - self.assertEqual(x.shape, (3, -1, 5)) - - out0 = paddle.slice(x, axes=[1], starts=[0], ends=[3]) - self.assertEqual(out0.shape, (3, 3, 5)) - - def test_axis_less_than_zero(self): - - # Using paddle.disable_static will make other unittests fail. - with fluid.dygraph.guard(): - x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4]) - x = paddle.to_tensor(x_arr) - - pp_slice = paddle.slice(x, [100, ], [0], [1]) - np_slice = x_arr[:, :, 0:1] - self.assertTrue(np.array_equal(pp_slice, np_slice)) - - pp_slice = paddle.slice(x, (-100, ), [0], [1]) - np_slice = x_arr[0:1] - self.assertTrue(np.array_equal(pp_slice, np_slice)) - - x_arr = np.array([], dtype=np.float32) - x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0))) - - starts = paddle.to_tensor( - np.reshape( - np.array( - [], dtype=np.int32), (0, ))) - ends = paddle.to_tensor( - np.reshape( - np.array( - [], dtype=np.int32), (0, ))) - - with self.assertRaises(ValueError): - paddle.slice(x, [-1000000], starts, ends) - - with self.assertRaises(ValueError): - paddle.slice(x, [1000000], starts, ends) - - with self.assertRaises(ValueError): - paddle.slice(x, [], starts, ends) - - with self.assertRaises(ValueError): - paddle.slice(x, 0, starts, ends) - - -@unittest.skipIf(not core.is_compiled_with_cuda(), - "core is not compiled with CUDA") -class TestImperativeCUDAPinnedInput(unittest.TestCase): - def test_input_cuda_pinned_var(self): - with fluid.dygraph.guard(): - data = np.random.random((2, 80, 16128)).astype('float32') - var = core.VarBase( - value=data, - name='', - persistable=False, - place=fluid.CUDAPinnedPlace(), - zero_copy=False) - sliced = var[:, 10:, :var.shape[1]] - self.assertEqual(sliced.shape, [2, 70, 80]) - +# class TestCase1(TestSliceOp): +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [-3, 0, 2] +# self.ends = [3, 100, -1] +# self.axes = [0, 1, 2] +# self.infer_flags = [1, 1, 1] +# self.out = self.input[-3:3, 0:100, 2:-1, :] + +# class TestCase2(TestSliceOp): +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [-3, 0, 2] +# self.ends = [3, 100, -1] +# self.axes = [0, 1, 3] +# self.infer_flags = [1, 1, 1] +# self.out = self.input[-3:3, 0:100, :, 2:-1] + +# # 1.2 with attr(decrease) +# class TestSliceOp_decs_dim(OpTest): +# def setUp(self): +# self.op_type = "slice" +# self.config() +# self.inputs = {'Input': self.input} +# self.outputs = {'Out': self.out} +# self.attrs = { +# 'axes': self.axes, +# 'starts': self.starts, +# 'ends': self.ends, +# 'infer_flags': self.infer_flags, +# 'decrease_axis': self.decrease_axis, +# } + +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [1, 0, 2] +# self.ends = [2, 3, 4] +# self.axes = [0, 1, 2] +# self.decrease_axis = [0] +# self.infer_flags = [1, 1, 1] +# self.out = self.input[1, 0:3, 2:4, :] + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad_normal(self): +# self.check_grad(['Input'], 'Out', max_relative_error=0.006) + +# class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim): +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [1, 0, 2] +# self.ends = [2, 1, 4] +# self.axes = [0, 1, 2] +# self.decrease_axis = [0, 1] +# self.infer_flags = [1, 1, 1] +# self.out = self.input[1, 0, 2:4, :] + +# class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim): +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [-1, 0, 2] +# self.ends = [1000000, 1, 4] +# self.axes = [0, 1, 2] +# self.decrease_axis = [0, 1] +# self.infer_flags = [1, 1, 1] +# self.out = self.input[-1, 0, 2:4, :] + +# class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim): +# def config(self): +# self.input = np.random.random([3, 4, 5, 7]).astype("float64") +# self.starts = [0, 1, 2, 3] +# self.ends = [1, 2, 3, 4] +# self.axes = [0, 1, 2, 3] +# self.decrease_axis = [0, 1, 2, 3] +# self.infer_flags = [1, 1, 1] +# self.out = self.input[0, 1, 2, 3:4] + +# class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim): +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [-1] +# self.ends = [1000000] +# self.axes = [3] +# self.decrease_axis = [3] +# self.infer_flags = [1, 1, 1] +# self.out = self.input[:, :, :, -1] + +# class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim): +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [0, 1, 2, 3] +# self.ends = [1, 2, 3, 4] +# self.axes = [0, 1, 2, 3] +# self.decrease_axis = [0, 1, 2, 3] +# self.infer_flags = [1, 1, 1] +# self.out = self.input[0, 1, 2, 3:4] + +# # Situation 2: starts(list, have tensor), ends(list, no tensor) +# # without attr(decrease) +# class TestSliceOp_starts_ListTensor(OpTest): +# def setUp(self): +# self.op_type = "slice" +# self.config() + +# starts_tensor = [] +# for index, ele in enumerate(self.starts): +# starts_tensor.append(("x" + str(index), np.ones( +# (1)).astype('int64') * ele)) + +# self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} +# self.outputs = {'Out': self.out} +# self.attrs = { +# 'axes': self.axes, +# 'starts': self.starts_infer, +# 'ends': self.ends, +# 'infer_flags': self.infer_flags +# } + +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [1, 0, 2] +# self.ends = [3, 3, 4] +# self.axes = [0, 1, 2] +# self.infer_flags = [-1, 1, -1] +# self.out = self.input[1:3, 0:3, 2:4, :] + +# self.starts_infer = [-1, 0, -1] + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad_normal(self): +# self.check_grad(['Input'], 'Out', max_relative_error=0.006) + +# # Situation 2: starts(list, have tensor), ends(list, no tensor) +# # with attr(decrease) +# class TestSliceOp_decs_dim_starts_ListTensor(OpTest): +# def setUp(self): +# self.op_type = "slice" +# self.config() + +# starts_tensor = [] +# for index, ele in enumerate(self.starts): +# starts_tensor.append(("x" + str(index), np.ones( +# (1)).astype('int32') * ele)) + +# self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} + +# self.outputs = {'Out': self.out} +# self.attrs = { +# 'axes': self.axes, +# 'starts': self.starts_infer, +# 'ends': self.ends, +# 'infer_flags': self.infer_flags, +# 'decrease_axis': self.decrease_axis, +# } + +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [1, 0, 2] +# self.ends = [2, 3, 4] +# self.axes = [0, 1, 2] +# self.decrease_axis = [0] +# self.infer_flags = [1, -1, 1] +# self.out = self.input[1, 0:3, 2:4, :] + +# self.starts_infer = [1, -1, 2] + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad_normal(self): +# self.check_grad(['Input'], 'Out', max_relative_error=0.006) + +# class TestSliceOp_decs_dim_5_starts_ListTensor( +# TestSliceOp_decs_dim_starts_ListTensor): +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [-1] +# self.ends = [1000000] +# self.axes = [3] +# self.decrease_axis = [3] +# self.infer_flags = [-1] +# self.out = self.input[:, :, :, -1] + +# self.starts_infer = [-1] + +# # Situation 3: starts(tensor), ends(list, no tensor) +# # with attr(decrease) +# class TestSliceOp_decs_dim_starts_OneTensor(OpTest): +# def setUp(self): +# self.op_type = "slice" +# self.config() +# self.inputs = { +# 'Input': self.input, +# "StartsTensor": np.array( +# self.starts, dtype="int32") +# } +# self.outputs = {'Out': self.out} +# self.attrs = { +# 'axes': self.axes, +# #'starts': self.starts, +# 'ends': self.ends, +# 'infer_flags': self.infer_flags, +# 'decrease_axis': self.decrease_axis, +# } + +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [1, 0, 2] +# self.ends = [2, 3, 4] +# self.axes = [0, 1, 2] +# self.decrease_axis = [0] +# self.infer_flags = [-1, -1, -1] +# self.out = self.input[1, 0:3, 2:4, :] + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad_normal(self): +# self.check_grad(['Input'], 'Out', max_relative_error=0.006) + +# # Situation 4: starts(tensor), ends(tensor) +# # without attr(decrease) +# class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): +# def setUp(self): +# self.op_type = "slice" +# self.config() + +# self.inputs = { +# 'Input': self.input, +# "StartsTensor": np.array( +# self.starts, dtype="int64"), +# "EndsTensor": np.array( +# self.ends, dtype="int32") +# } +# self.outputs = {'Out': self.out} +# self.attrs = { +# 'axes': self.axes, +# #'starts': self.starts, +# #'ends': self.ends_infer, +# 'infer_flags': self.infer_flags +# } + +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [1, 0, 2] +# self.ends = [3, 3, 4] +# self.axes = [0, 1, 2] +# self.infer_flags = [-1, -1, -1] +# self.out = self.input[1:3, 0:3, 2:4, :] + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad_normal(self): +# self.check_grad(['Input'], 'Out', max_relative_error=0.006) + +# # Situation 5: starts(tensor), ends(tensor) +# # with attr(decrease) +# class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest): +# def setUp(self): +# self.op_type = "slice" +# self.config() +# self.inputs = { +# 'Input': self.input, +# "StartsTensor": np.array( +# self.starts, dtype="int32"), +# "EndsTensor": np.array( +# self.ends, dtype="int32") +# } +# self.outputs = {'Out': self.out} +# self.attrs = { +# 'axes': self.axes, +# #'starts': self.starts, +# #'ends': self.ends, +# 'infer_flags': self.infer_flags, +# 'decrease_axis': self.decrease_axis, +# } + +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [1, 0, 2] +# self.ends = [2, 1, 4] +# self.axes = [0, 1, 2] +# self.decrease_axis = [0, 1] +# self.infer_flags = [-1, -1, -1] +# self.out = self.input[1, 0, 2:4, :] + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad_normal(self): +# self.check_grad(['Input'], 'Out', max_relative_error=0.006) + +# # Situation 6: starts(tensor), ends(list, have tensor) +# # without attr(decrease) +# class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): +# def setUp(self): +# self.op_type = "slice" +# self.config() + +# ends_tensor = [] +# for index, ele in enumerate(self.ends): +# ends_tensor.append(("y" + str(index), np.ones( +# (1)).astype('int32') * ele)) + +# self.inputs = { +# 'Input': self.input, +# "StartsTensor": np.array( +# self.starts, dtype="int32"), +# 'EndsTensorList': ends_tensor +# } +# self.outputs = {'Out': self.out} +# self.attrs = { +# 'axes': self.axes, +# #'starts': self.starts, +# 'ends': self.ends_infer, +# 'infer_flags': self.infer_flags +# } + +# def config(self): +# self.input = np.random.random([3, 4, 5, 6]).astype("float64") +# self.starts = [1, 0, 2] +# self.ends = [3, 3, 4] +# self.axes = [0, 1, 2] +# self.infer_flags = [-1, -1, -1] +# self.out = self.input[1:3, 0:3, 2:4, :] + +# self.ends_infer = [-1, 3, 4] + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad_normal(self): +# self.check_grad(['Input'], 'Out', max_relative_error=0.006) + +# # Test CUDA float16 +# @unittest.skipIf(not core.is_compiled_with_cuda(), +# "core is not compiled with CUDA") +# class TestFP16(OpTest): +# def setUp(self): +# self.op_type = "slice" +# self.config() +# self.inputs = {'Input': self.input} +# self.outputs = {'Out': self.out} +# self.attrs = { +# 'axes': self.axes, +# 'starts': self.starts, +# 'ends': self.ends, +# 'infer_flags': self.infer_flags +# } + +# def config(self): +# self.dtype = "float16" +# self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) +# self.starts = [-3, 0, 2] +# self.ends = [3, 100, -1] +# self.axes = [0, 1, 3] +# self.out = self.input[-3:3, 0:100, :, 2:-1] +# self.infer_flags = [1, 1, 1] + +# def test_check_output(self): +# place = core.CUDAPlace(0) +# if core.is_float16_supported(place): +# self.check_output_with_place(place, atol=1e-5) + +# def test_check_grad_normal(self): +# place = core.CUDAPlace(0) +# if core.is_float16_supported(place): +# self.check_grad_with_place( +# place, ['Input'], 'Out', max_relative_error=0.006) + +# @unittest.skipIf(not core.is_compiled_with_cuda(), +# "core is not compiled with CUDA") +# class TestFP16_2(OpTest): +# def setUp(self): +# self.op_type = "slice" +# self.config() +# self.inputs = {'Input': self.input} +# self.outputs = {'Out': self.out} +# self.attrs = { +# 'axes': self.axes, +# 'starts': self.starts, +# 'ends': self.ends, +# 'infer_flags': self.infer_flags +# } + +# def config(self): +# self.dtype = "float16" +# self.input = np.random.random([3, 4, 10]).astype(self.dtype) +# self.starts = [0] +# self.ends = [1] +# self.axes = [1] +# self.out = self.input[:, 0:1, :] +# self.infer_flags = [1] + +# def test_check_output(self): +# place = core.CUDAPlace(0) +# if core.is_float16_supported(place): +# self.check_output_with_place(place, atol=1e-5) + +# def test_check_grad_normal(self): +# place = core.CUDAPlace(0) +# if core.is_float16_supported(place): +# self.check_grad_with_place( +# place, ['Input'], +# 'Out', +# max_relative_error=0.006, +# numeric_grad_delta=0.5) + +# class TestBF16(OpTest): +# def setUp(self): +# self.op_type = "slice" +# self.config() +# self.inputs = {'Input': convert_float_to_uint16(self.input)} +# self.outputs = {'Out': convert_float_to_uint16(self.out)} +# self.attrs = { +# 'axes': self.axes, +# 'starts': self.starts, +# 'ends': self.ends, +# 'infer_flags': self.infer_flags +# } + +# def config(self): +# self.dtype = np.uint16 +# self.input = np.random.random([3, 4, 5, 6]).astype(np.float32) +# self.starts = [-3, 0, 2] +# self.ends = [3, 100, -1] +# self.axes = [0, 1, 3] +# self.out = self.input[-3:3, 0:100, :, 2:-1] +# self.infer_flags = [1, 1, 1] + +# def test_check_output(self): +# self.check_output() + +# def test_check_grad_normal(self): +# self.check_grad(['Input'], 'Out') + +# # Test python API +# class TestSliceAPI(unittest.TestCase): +# def test_1(self): +# input = np.random.random([3, 4, 5, 6]).astype("float64") +# minus_1 = fluid.layers.fill_constant([1], "int32", -1) +# minus_3 = fluid.layers.fill_constant([1], "int64", -3) +# starts = fluid.layers.data( +# name='starts', shape=[1, 3], append_batch_size=False) +# ends = fluid.layers.data( +# name='ends', shape=[3], append_batch_size=False) + +# x = fluid.layers.data( +# name="x", +# shape=[3, 4, 5, 6], +# append_batch_size=False, +# dtype="float64") + +# # value_int64 is greater than 2147483647 which is the max of int32 +# value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648) + +# out_1 = fluid.layers.slice( +# x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1]) +# out_2 = fluid.layers.slice( +# x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1]) +# out_3 = fluid.layers.slice( +# x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1]) +# out_4 = fluid.layers.slice(x, axes=[0, 1, 2], starts=starts, ends=ends) + +# out_5 = x[-3:3, 0:100, 2:-1] +# out_6 = x[minus_3:3, 0:100, :, 2:-1] +# out_7 = x[minus_1, 0:100, :, 2:minus_1] + +# exe = fluid.Executor(place=fluid.CPUPlace()) +# res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run( +# fluid.default_main_program(), +# feed={ +# "x": input, +# 'starts': np.array([-3, 0, 2]).astype("int32"), +# 'ends': np.array([3, 100, -1]).astype("int32") +# }, +# fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7]) + +# assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) +# assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) +# assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) +# assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) +# assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :]) +# assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1]) +# assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1]) + +# class TestSliceApiWithTensor(unittest.TestCase): +# def test_starts_ends_is_tensor(self): +# with paddle.fluid.dygraph.guard(): +# a = paddle.rand(shape=[4, 5, 6], dtype='float32') +# axes = [0, 1, 2] +# starts = [-3, 0, 2] +# ends = [3, 2, 4] +# a_1 = paddle.slice( +# a, +# axes=axes, +# starts=paddle.to_tensor( +# starts, dtype='int32'), +# ends=paddle.to_tensor( +# ends, dtype='int32')) +# a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends) + +# self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy())) + +# def test_bool_tensor(self): +# with paddle.fluid.dygraph.guard(): +# array = (np.arange(60).reshape([3, 4, 5]) % 3).astype('bool') +# tt = paddle.to_tensor(array) +# tt.stop_gradient = False + +# starts = [0, 1, 2] +# ends = [3, 5, 4] +# axes = [0, 1, 2] + +# y_paddle = paddle.slice(tt, axes, starts, ends) +# y_np = tt[0:3, 1:5, 2:4] + +# self.assertTrue(paddle.bool == y_paddle.dtype) +# self.assertTrue(np.array_equal(y_paddle.numpy(), y_np)) + +# class TestSliceApiWithLoDTensorArray(unittest.TestCase): +# def setUp(self): +# self.shape = (3, 4) +# self.data = np.random.random(size=self.shape).astype('float32') +# self.idx = 0 +# self.start = 0 +# self.end = 2 +# self.axis = 1 + +# self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( +# ) else fluid.CPUPlace() +# self.exe = fluid.Executor(self.place) + +# def set_program_and_run(self, main_program, case_num): +# with fluid.program_guard(main_program): +# x = [ +# fluid.data( +# name='x0', shape=self.shape, dtype="float32"), fluid.data( +# name='x1', shape=self.shape, dtype="float32"), +# fluid.data( +# name='x2', shape=self.shape, dtype="float32") +# ] + +# for each_x in x: +# each_x.stop_gradient = False + +# arr = layers.create_array(dtype="float32") +# for i in range(3): +# idx = layers.array_length(arr) +# arr = layers.array_write(x=x[i], i=idx, array=arr) + +# if case_num == 1: +# self.sliced_arr = output = arr[0] + +# elif case_num == 2: +# end = fluid.layers.array_length( +# arr) - 1 # dtype of end is int64 +# self.sliced_arr = slice_arr = arr[self.start:end] +# output, _ = fluid.layers.tensor_array_to_tensor( +# slice_arr, axis=self.axis, use_stack=True) +# elif case_num == 3: +# value_int64 = fluid.layers.fill_constant([1], "int64", +# 2147483648) +# self.sliced_arr = slice_arr = arr[self.start:value_int64] +# output, _ = fluid.layers.tensor_array_to_tensor( +# slice_arr, axis=self.axis, use_stack=True) + +# loss = fluid.layers.reduce_sum(output) +# fluid.backward.append_backward(loss) +# g_vars = list( +# map(main_program.global_block().var, +# [each_x.name + "@GRAD" for each_x in x])) +# self.out, self.g_x0, self.g_x1, self.g_x2 = \ +# self.exe.run(main_program, +# feed = {'x0': self.data, +# 'x1': self.data, +# 'x2': self.data}, +# fetch_list=[output] + g_vars) + +# def test_case_1(self): +# main_program = fluid.Program() +# self.set_program_and_run(main_program, 1) + +# self.assertTrue(self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR) +# self.assertEqual(self.sliced_arr.shape, self.shape) +# self.assertTrue(np.array_equal(self.out, self.data)) +# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) +# self.assertTrue(np.array_equal(self.g_x1, np.zeros_like(self.data))) +# self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data))) + +# def test_case_2(self): +# main_program = fluid.Program() +# self.set_program_and_run(main_program, 2) + +# self.assertTrue( +# self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY) +# self.assertEqual(self.sliced_arr.shape, self.shape) +# self.assertTrue( +# np.array_equal( +# self.out, np.stack( +# [self.data, self.data], axis=self.axis))) +# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) +# self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data))) +# self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data))) + +# def test_case_3(self): +# main_program = fluid.Program() +# self.set_program_and_run(main_program, 3) + +# self.assertTrue( +# self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY) +# self.assertEqual(self.sliced_arr.shape, self.shape) +# self.assertTrue( +# np.array_equal( +# self.out, +# np.stack( +# [self.data, self.data, self.data], axis=self.axis))) +# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) +# self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data))) +# self.assertTrue(np.array_equal(self.g_x2, np.ones_like(self.data))) + +# class TestImperativeVarBaseGetItem(unittest.TestCase): +# def test_getitem_with_long(self): +# with fluid.dygraph.guard(): +# data = np.random.random((2, 80, 16128)).astype('float32') +# var = fluid.dygraph.to_variable(data) +# sliced = var[:, 10:, :var.shape[1]] # var.shape[1] is 80L here +# self.assertEqual(sliced.shape, [2, 70, 80]) + +# sliced = var[:, var.shape[0]:, var.shape[0]:var.shape[1]] +# self.assertEqual(sliced.shape, [2, 78, 78]) + +# def test_getitem_with_float(self): +# def test_float_in_slice_item(): +# with fluid.dygraph.guard(): +# data = np.random.random((2, 80, 16128)).astype('float32') +# var = fluid.dygraph.to_variable(data) +# sliced = var[:, 1.1:, :var.shape[1]] + +# self.assertRaises(Exception, test_float_in_slice_item) + +# def test_float_in_index(): +# with fluid.dygraph.guard(): +# data = np.random.random((2, 80, 16128)).astype('float32') +# var = fluid.dygraph.to_variable(data) +# sliced = var[1.1] + +# self.assertRaises(Exception, test_float_in_index) + +# class TestInferShape(unittest.TestCase): +# def test(self): +# x = paddle.ones(shape=[3, 4, 5]) +# x.desc.set_shape([3, -1, 5]) +# self.assertEqual(x.shape, (3, -1, 5)) + +# out0 = paddle.slice(x, axes=[1], starts=[0], ends=[3]) +# self.assertEqual(out0.shape, (3, 3, 5)) + +# def test_axis_less_than_zero(self): + +# # Using paddle.disable_static will make other unittests fail. +# with fluid.dygraph.guard(): +# x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4]) +# x = paddle.to_tensor(x_arr) + +# pp_slice = paddle.slice(x, [100, ], [0], [1]) +# np_slice = x_arr[:, :, 0:1] +# self.assertTrue(np.array_equal(pp_slice, np_slice)) + +# pp_slice = paddle.slice(x, (-100, ), [0], [1]) +# np_slice = x_arr[0:1] +# self.assertTrue(np.array_equal(pp_slice, np_slice)) + +# x_arr = np.array([], dtype=np.float32) +# x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0))) + +# starts = paddle.to_tensor( +# np.reshape( +# np.array( +# [], dtype=np.int32), (0, ))) +# ends = paddle.to_tensor( +# np.reshape( +# np.array( +# [], dtype=np.int32), (0, ))) + +# with self.assertRaises(ValueError): +# paddle.slice(x, [-1000000], starts, ends) + +# with self.assertRaises(ValueError): +# paddle.slice(x, [1000000], starts, ends) + +# with self.assertRaises(ValueError): +# paddle.slice(x, [], starts, ends) + +# with self.assertRaises(ValueError): +# paddle.slice(x, 0, starts, ends) + +# @unittest.skipIf(not core.is_compiled_with_cuda(), +# "core is not compiled with CUDA") +# class TestImperativeCUDAPinnedInput(unittest.TestCase): +# def test_input_cuda_pinned_var(self): +# with fluid.dygraph.guard(): +# data = np.random.random((2, 80, 16128)).astype('float32') +# var = core.VarBase( +# value=data, +# name='', +# persistable=False, +# place=fluid.CUDAPinnedPlace(), +# zero_copy=False) +# sliced = var[:, 10:, :var.shape[1]] +# self.assertEqual(sliced.shape, [2, 70, 80]) if __name__ == '__main__': + paddle.enable_static() unittest.main()