From 586671ea41710dd5d7595f760d3929823da662ab Mon Sep 17 00:00:00 2001 From: phlrain Date: Fri, 11 Mar 2022 09:17:51 +0000 Subject: [PATCH] fix error --- paddle/fluid/operators/slice_op.h | 163 +- .../phi/kernels/gpu/slice_grad_kernel.cu.cc | 3 +- .../phi/kernels/impl/slice_grad_kernel_impl.h | 37 +- paddle/phi/kernels/impl/slice_kernel_impl.h | 7 +- paddle/phi/kernels/slice_grad_kernel.h | 6 +- paddle/phi/kernels/slice_kernel.h | 5 +- paddle/phi/ops/compat/slice_sig.cc | 156 +- paddle/pten/kernels/slice_kernel.h | 5 +- .../fluid/tests/unittests/test_slice_op.py | 1455 +++++++++-------- 9 files changed, 1086 insertions(+), 751 deletions(-) diff --git a/paddle/fluid/operators/slice_op.h b/paddle/fluid/operators/slice_op.h index 59db3cff32b..82ea25a5823 100644 --- a/paddle/fluid/operators/slice_op.h +++ b/paddle/fluid/operators/slice_op.h @@ -28,10 +28,103 @@ using Variable = framework::Variable; using LoDTensorArray = framework::LoDTensorArray; using DDim = framework::DDim; +inline void DealTensorArray(const framework::ExecutionContext& ctx, + const std::vector& starts, + const std::vector& ends, + bool out_is_array) { + auto in_array = ctx.Input("Input"); + // If the input is LoDTensorArray, the rank of input is 1. + int64_t in_size = in_array->size(); + int64_t start = starts[0] < 0 ? (starts[0] + in_size) : starts[0]; + int64_t end = ends[0] < 0 ? (ends[0] + in_size) : ends[0]; + + start = std::max(start, static_cast(0)); + end = std::max(end, static_cast(0)); + end = std::min(end, in_size); + + if (starts[0] == -1 && end == 0) { + end = start + 1; + } + + PADDLE_ENFORCE_GT(end, start, + platform::errors::InvalidArgument( + "Attr(ends) should be greater than attr(starts) in " + "slice op. But received end = %d, start = %d.", + ends[0], starts[0])); + int64_t out_size = end - start; + + if (out_is_array) { + auto out_array = ctx.Output("Out"); + out_array->resize(out_size); + + for (int i = 0; i < out_size; ++i) { + auto* out_tensor = &out_array->at(i); + auto in_tensor = in_array->at(i + start); + out_tensor->set_lod(in_tensor.lod()); + if (in_tensor.memory_size() > 0) { + paddle::framework::TensorCopy(in_tensor, ctx.GetPlace(), out_tensor); + } else { + VLOG(10) << "WARNING: The input tensor 'x_tensor' holds no memory, so " + "nothing has been written to output array[" + << i << "]."; + } + } + } else { + auto out = ctx.Output("Out"); + auto in_tensor = in_array->at(start); + paddle::framework::TensorCopy(in_tensor, ctx.GetPlace(), out); + } +} + template class SliceKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override {} + void Compute(const framework::ExecutionContext& ctx) const override { + const Variable* input_var = ctx.InputVar("Input"); + Variable* out_var = ctx.OutputVar("Out"); + bool input_is_array = input_var->IsType(); + bool out_is_array = out_var->IsType(); + + auto axes_int = ctx.Attr>("axes"); + auto starts_int = ctx.Attr>("starts"); + auto ends_int = ctx.Attr>("ends"); + std::vector axes(axes_int.begin(), axes_int.end()); + std::vector starts(starts_int.begin(), starts_int.end()); + std::vector ends(ends_int.begin(), ends_int.end()); + + auto decrease_axis = ctx.Attr>("decrease_axis"); + auto infer_flags = ctx.Attr>("infer_flags"); + + // Step 1: Get the accurate attribute value of starts and ends + auto starts_tensor_list = ctx.MultiInput("StartsTensorList"); + if (ctx.HasInput("StartsTensor")) { + starts = GetDataFromTensor(ctx.Input("StartsTensor")); + } else if (starts_tensor_list.size() > 0) { + starts = GetDataFromTensorList(starts_tensor_list); + } + + auto ends_tensor_list = ctx.MultiInput("EndsTensorList"); + if (ctx.HasInput("EndsTensor")) { + ends = GetDataFromTensor(ctx.Input("EndsTensor")); + } else if (ends_tensor_list.size() > 0) { + ends = GetDataFromTensorList(ends_tensor_list); + } + + PADDLE_ENFORCE_EQ( + starts.size(), axes.size(), + platform::errors::InvalidArgument( + "The size of starts must be equal to the size of axes.")); + PADDLE_ENFORCE_EQ( + ends.size(), axes.size(), + platform::errors::InvalidArgument( + "The size of ends must be equal to the size of axes.")); + + // Step 2: Compute output + if (input_is_array) { + DealTensorArray(ctx, starts, ends, out_is_array); + return; + } + } private: }; @@ -39,7 +132,73 @@ class SliceKernel : public framework::OpKernel { template class SliceGradKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override {} + void Compute(const framework::ExecutionContext& ctx) const override { + auto axes = ctx.Attr>("axes"); + auto starts_int = ctx.Attr>("starts"); + auto ends_int = ctx.Attr>("ends"); + std::vector starts(starts_int.begin(), starts_int.end()); + std::vector ends(ends_int.begin(), ends_int.end()); + + // Get the accurate attribute value of starts and ends + auto starts_tensor_list = ctx.MultiInput("StartsTensorList"); + if (ctx.HasInput("StartsTensor")) { + starts = GetDataFromTensor(ctx.Input("StartsTensor")); + } else if (starts_tensor_list.size() > 0) { + starts = GetDataFromTensorList(starts_tensor_list); + } + + auto ends_tensor_list = ctx.MultiInput("EndsTensorList"); + if (ctx.HasInput("EndsTensor")) { + ends = GetDataFromTensor(ctx.Input("EndsTensor")); + } else if (ends_tensor_list.size() > 0) { + ends = GetDataFromTensorList(ends_tensor_list); + } + + Variable* d_input_var = ctx.OutputVar(framework::GradVarName("Input")); + const Variable* d_out_var = ctx.InputVar(framework::GradVarName("Out")); + bool d_input_is_array = d_input_var->IsType(); + bool d_out_is_array = d_out_var->IsType(); + + if (d_input_is_array) { + auto* input_array = ctx.Input("Input"); + auto* d_in_arr = + ctx.Output(framework::GradVarName("Input")); + + int64_t d_in_size = input_array->size(); + d_in_arr->resize(d_in_size); + // If the input is LoDTensorArray, the rank of input is 1. + // So only use the 0th element of starts. + int64_t start = starts[0] < 0 ? (starts[0] + d_in_size) : starts[0]; + start = std::max(start, static_cast(0)); + // set zero + platform::DeviceContextPool& pool = + platform::DeviceContextPool::Instance(); + auto& dev_ctx = *pool.Get(ctx.GetPlace()); + phi::funcs::SetConstant functor; + for (int i = 0; i < d_in_size; ++i) { + auto dim = input_array->at(i).dims(); + d_in_arr->at(i).Resize(dim); + d_in_arr->at(i).mutable_data(ctx.GetPlace()); + functor(reinterpret_cast(dev_ctx), + &d_in_arr->at(i), static_cast(0)); + } + + if (d_out_is_array) { + auto* d_out_arr = + ctx.Input(framework::GradVarName("Out")); + int d_out_size = d_out_arr->size(); + for (int i = 0; i < d_out_size; ++i) { + paddle::framework::TensorCopy(d_out_arr->at(i), ctx.GetPlace(), + &(d_in_arr->at(start + i))); + } + } else { + auto* d_out = ctx.Input(framework::GradVarName("Out")); + paddle::framework::TensorCopy(*d_out, ctx.GetPlace(), + &(d_in_arr->at(start))); + } + return; + } + } private: }; diff --git a/paddle/phi/kernels/gpu/slice_grad_kernel.cu.cc b/paddle/phi/kernels/gpu/slice_grad_kernel.cu.cc index 8a0dc895db1..2769f5cc65d 100644 --- a/paddle/phi/kernels/gpu/slice_grad_kernel.cu.cc +++ b/paddle/phi/kernels/gpu/slice_grad_kernel.cu.cc @@ -29,4 +29,5 @@ PD_REGISTER_KERNEL(slice_grad, double, phi::dtype::complex, phi::dtype::complex, - phi::dtype::bfloat16) {} + phi::dtype::bfloat16, + phi::dtype::float16) {} diff --git a/paddle/phi/kernels/impl/slice_grad_kernel_impl.h b/paddle/phi/kernels/impl/slice_grad_kernel_impl.h index bc4246a3849..a509d4d44e2 100644 --- a/paddle/phi/kernels/impl/slice_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/slice_grad_kernel_impl.h @@ -30,6 +30,8 @@ void LaunchEigenPadding( const DDim& out_dims, const Eigen::array, D>& paddings) { auto& place = *context.template eigen_device(); + LOG(ERROR) << D << "\t" << in_dims; + LOG(ERROR) << out_dims; auto d_in_t = EigenTensor::From( *d_input, in_dims); auto d_out_t = EigenTensor::From( @@ -150,12 +152,12 @@ void EigenPaddingCompute( // the second dimension do not need padding, set padding[1] zero reshaped_padding[1].first = reshaped_padding[1].second = 0; - LaunchEigenPadding(context, - d_input, - reshaped_in_dims, - d_out, - reshaped_out_dims, - reshaped_padding); + LaunchEigenPadding(context, + d_input, + reshaped_in_dims, + d_out, + reshaped_out_dims, + reshaped_padding); } else { // other dimension need padding // reshape the dimension of tensor in 3: @@ -190,12 +192,13 @@ void EigenPaddingCompute( // the third dimension do not need padding, set padding[2] zero reshaped_padding[2].first = reshaped_padding[2].second = 0; - LaunchEigenPadding(context, - d_input, - reshaped_in_dims, - d_out, - reshaped_out_dims, - reshaped_padding); + LOG(ERROR) << "run here"; + LaunchEigenPadding(context, + d_input, + reshaped_in_dims, + d_out, + reshaped_out_dims, + reshaped_padding); } } else { // need padding at many dimension, cannot reduce dimension @@ -270,14 +273,18 @@ void SliceGradCompute(const Context& ctx, template void SliceGradRawKernel(const Context& ctx, + const DenseTensor& input, const DenseTensor& out_grad, const std::vector& axes, - const std::vector& starts, - const std::vector& ends, + const ScalarArray& starts_arr, + const ScalarArray& ends_arr, const std::vector& infer_flags, const std::vector& decrease_axis, DenseTensor* input_grad) { - size_t rank = out_grad.dims().size(); + size_t rank = input.dims().size(); + + auto& starts = starts_arr.GetData(); + auto& ends = ends_arr.GetData(); switch (rank) { case 1: diff --git a/paddle/phi/kernels/impl/slice_kernel_impl.h b/paddle/phi/kernels/impl/slice_kernel_impl.h index 5ee138eee67..2d8ce8abd02 100644 --- a/paddle/phi/kernels/impl/slice_kernel_impl.h +++ b/paddle/phi/kernels/impl/slice_kernel_impl.h @@ -110,13 +110,16 @@ template void SliceRawKernel(const Context& ctx, const DenseTensor& input, const std::vector& axes, - const std::vector& starts, - const std::vector& ends, + const ScalarArray& starts_arr, + const ScalarArray& ends_arr, const std::vector& infer_flags, const std::vector& decrease_axis, DenseTensor* out) { int rank = input.dims().size(); + auto& starts = starts_arr.GetData(); + auto& ends = ends_arr.GetData(); + switch (rank) { case 1: SliceCompute( diff --git a/paddle/phi/kernels/slice_grad_kernel.h b/paddle/phi/kernels/slice_grad_kernel.h index c76c9dede5f..a7ee9ffde4e 100644 --- a/paddle/phi/kernels/slice_grad_kernel.h +++ b/paddle/phi/kernels/slice_grad_kernel.h @@ -14,16 +14,18 @@ #pragma once +#include "paddle/phi/common/scalar_array.h" #include "paddle/phi/core/dense_tensor.h" namespace phi { template void SliceGradRawKernel(const Context& ctx, + const DenseTensor& input, const DenseTensor& out_grad, const std::vector& axes, - const std::vector& starts, - const std::vector& ends, + const ScalarArray& starts, + const ScalarArray& ends, const std::vector& infer_flags, const std::vector& decrease_axis, DenseTensor* input_grad); diff --git a/paddle/phi/kernels/slice_kernel.h b/paddle/phi/kernels/slice_kernel.h index 2bbe590ce65..ff27824b9e6 100644 --- a/paddle/phi/kernels/slice_kernel.h +++ b/paddle/phi/kernels/slice_kernel.h @@ -14,6 +14,7 @@ #pragma once +#include "paddle/phi/common/scalar_array.h" #include "paddle/phi/core/dense_tensor.h" namespace phi { @@ -22,8 +23,8 @@ template void SliceRawKernel(const Context& ctx, const DenseTensor& input, const std::vector& axes, - const std::vector& starts, - const std::vector& ends, + const ScalarArray& starts, + const ScalarArray& ends, const std::vector& infer_flags, const std::vector& decrease_axis, DenseTensor* out); diff --git a/paddle/phi/ops/compat/slice_sig.cc b/paddle/phi/ops/compat/slice_sig.cc index 5048ba260ab..b51b7602939 100644 --- a/paddle/phi/ops/compat/slice_sig.cc +++ b/paddle/phi/ops/compat/slice_sig.cc @@ -17,19 +17,155 @@ namespace phi { KernelSignature SliceOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "slice", - {"Input"}, - {"axes", "starts", "ends", "infer_flags", "decrease_axis"}, - {"Out"}); + if (ctx.HasInput("StartsTensor")) { + if (ctx.HasInput("EndsTensor")) { + return KernelSignature("slice", + {"Input"}, + {"axes", + "StartsTensor", + "EndsTensor", + "infer_flags", + "decrease_axis"}, + {"Out"}); + } else if (ctx.InputSize("EndsTensorList") > 0) { + return KernelSignature("slice", + {"Input"}, + {"axes", + "StartsTensor", + "EndsTensorList", + "infer_flags", + "decrease_axis"}, + {"Out"}); + } else { + return KernelSignature( + "slice", + {"Input"}, + {"axes", "StartsTensor", "ends", "infer_flags", "decrease_axis"}, + {"Out"}); + } + } else if (ctx.InputSize("StartsTensorList") > 0) { + if (ctx.HasInput("EndsTensor")) { + return KernelSignature("slice", + {"Input"}, + {"axes", + "StartsTensorList", + "EndsTensor", + "infer_flags", + "decrease_axis"}, + {"Out"}); + } else if (ctx.InputSize("EndsTensorList") > 0) { + return KernelSignature("slice", + {"Input"}, + {"axes", + "StartsTensorList", + "EndsTensorList", + "infer_flags", + "decrease_axis"}, + {"Out"}); + } else { + return KernelSignature( + "slice", + {"Input"}, + {"axes", "StartsTensorList", "ends", "infer_flags", "decrease_axis"}, + {"Out"}); + } + } else { + if (ctx.HasInput("EndsTensor")) { + return KernelSignature( + "slice", + {"Input"}, + {"axes", "starts", "EndsTensor", "infer_flags", "decrease_axis"}, + {"Out"}); + } else if (ctx.InputSize("EndsTensorList") > 0) { + return KernelSignature( + "slice", + {"Input"}, + {"axes", "starts", "EndsTensorList", "infer_flags", "decrease_axis"}, + {"Out"}); + } else { + return KernelSignature( + "slice", + {"Input"}, + {"axes", "starts", "ends", "infer_flags", "decrease_axis"}, + {"Out"}); + } + } } KernelSignature SliceGradOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "slice_grad", - {GradVarName("Out")}, - {"axes", "starts", "ends", "infer_flags", "decrease_axis"}, - {GradVarName("Input")}); + if (ctx.HasInput("StartsTensor")) { + if (ctx.HasInput("EndsTensor")) { + return KernelSignature("slice_grad", + {"Input", GradVarName("Out")}, + {"axes", + "StartsTensor", + "EndsTensor", + "infer_flags", + "decrease_axis"}, + {GradVarName("Input")}); + } else if (ctx.InputSize("EndsTensorList") > 0) { + return KernelSignature("slice_grad", + {"Input", GradVarName("Out")}, + {"axes", + "StartsTensor", + "EndsTensorList", + "infer_flags", + "decrease_axis"}, + {GradVarName("Input")}); + } else { + return KernelSignature( + "slice_grad", + {"Input", GradVarName("Out")}, + {"axes", "StartsTensor", "ends", "infer_flags", "decrease_axis"}, + {GradVarName("Input")}); + } + } else if (ctx.InputSize("StartsTensorList") > 0) { + if (ctx.HasInput("EndsTensor")) { + return KernelSignature("slice_grad", + {"Input", GradVarName("Out")}, + {"axes", + "StartsTensorList", + "EndsTensor", + "infer_flags", + "decrease_axis"}, + {GradVarName("Input")}); + } else if (ctx.InputSize("EndsTensorList") > 0) { + return KernelSignature("slice_grad", + {"Input", GradVarName("Out")}, + {"axes", + "StartsTensorList", + "EndsTensorList", + "infer_flags", + "decrease_axis"}, + {GradVarName("Input")}); + } else { + return KernelSignature( + "slice_grad", + {"Input", GradVarName("Out")}, + {"axes", "StartsTensorList", "ends", "infer_flags", "decrease_axis"}, + {GradVarName("Input")}); + } + } else { + if (ctx.HasInput("EndsTensor")) { + return KernelSignature( + "slice_grad", + {"Input", GradVarName("Out")}, + {"axes", "starts", "EndsTensor", "infer_flags", "decrease_axis"}, + {GradVarName("Input")}); + } else if (ctx.InputSize("EndsTensorList") > 0) { + return KernelSignature( + "slice_grad", + {"Input", GradVarName("Out")}, + {"axes", "starts", "EndsTensorList", "infer_flags", "decrease_axis"}, + {GradVarName("Input")}); + } else { + return KernelSignature( + "slice_grad", + {"Input", GradVarName("Out")}, + {"axes", "starts", "ends", "infer_flags", "decrease_axis"}, + {GradVarName("Input")}); + } + } } } // namespace phi diff --git a/paddle/pten/kernels/slice_kernel.h b/paddle/pten/kernels/slice_kernel.h index 2bbe590ce65..ff27824b9e6 100644 --- a/paddle/pten/kernels/slice_kernel.h +++ b/paddle/pten/kernels/slice_kernel.h @@ -14,6 +14,7 @@ #pragma once +#include "paddle/phi/common/scalar_array.h" #include "paddle/phi/core/dense_tensor.h" namespace phi { @@ -22,8 +23,8 @@ template void SliceRawKernel(const Context& ctx, const DenseTensor& input, const std::vector& axes, - const std::vector& starts, - const std::vector& ends, + const ScalarArray& starts, + const ScalarArray& ends, const std::vector& infer_flags, const std::vector& decrease_axis, DenseTensor* out); diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index 7bc107205da..f1939176177 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -55,721 +55,746 @@ class TestSliceOp(OpTest): self.check_grad(['Input'], 'Out', max_relative_error=0.006) -# class TestCase1(TestSliceOp): -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [-3, 0, 2] -# self.ends = [3, 100, -1] -# self.axes = [0, 1, 2] -# self.infer_flags = [1, 1, 1] -# self.out = self.input[-3:3, 0:100, 2:-1, :] - -# class TestCase2(TestSliceOp): -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [-3, 0, 2] -# self.ends = [3, 100, -1] -# self.axes = [0, 1, 3] -# self.infer_flags = [1, 1, 1] -# self.out = self.input[-3:3, 0:100, :, 2:-1] - -# # 1.2 with attr(decrease) -# class TestSliceOp_decs_dim(OpTest): -# def setUp(self): -# self.op_type = "slice" -# self.config() -# self.inputs = {'Input': self.input} -# self.outputs = {'Out': self.out} -# self.attrs = { -# 'axes': self.axes, -# 'starts': self.starts, -# 'ends': self.ends, -# 'infer_flags': self.infer_flags, -# 'decrease_axis': self.decrease_axis, -# } - -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [1, 0, 2] -# self.ends = [2, 3, 4] -# self.axes = [0, 1, 2] -# self.decrease_axis = [0] -# self.infer_flags = [1, 1, 1] -# self.out = self.input[1, 0:3, 2:4, :] - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad_normal(self): -# self.check_grad(['Input'], 'Out', max_relative_error=0.006) - -# class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim): -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [1, 0, 2] -# self.ends = [2, 1, 4] -# self.axes = [0, 1, 2] -# self.decrease_axis = [0, 1] -# self.infer_flags = [1, 1, 1] -# self.out = self.input[1, 0, 2:4, :] - -# class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim): -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [-1, 0, 2] -# self.ends = [1000000, 1, 4] -# self.axes = [0, 1, 2] -# self.decrease_axis = [0, 1] -# self.infer_flags = [1, 1, 1] -# self.out = self.input[-1, 0, 2:4, :] - -# class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim): -# def config(self): -# self.input = np.random.random([3, 4, 5, 7]).astype("float64") -# self.starts = [0, 1, 2, 3] -# self.ends = [1, 2, 3, 4] -# self.axes = [0, 1, 2, 3] -# self.decrease_axis = [0, 1, 2, 3] -# self.infer_flags = [1, 1, 1] -# self.out = self.input[0, 1, 2, 3:4] - -# class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim): -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [-1] -# self.ends = [1000000] -# self.axes = [3] -# self.decrease_axis = [3] -# self.infer_flags = [1, 1, 1] -# self.out = self.input[:, :, :, -1] - -# class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim): -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [0, 1, 2, 3] -# self.ends = [1, 2, 3, 4] -# self.axes = [0, 1, 2, 3] -# self.decrease_axis = [0, 1, 2, 3] -# self.infer_flags = [1, 1, 1] -# self.out = self.input[0, 1, 2, 3:4] - -# # Situation 2: starts(list, have tensor), ends(list, no tensor) -# # without attr(decrease) -# class TestSliceOp_starts_ListTensor(OpTest): -# def setUp(self): -# self.op_type = "slice" -# self.config() - -# starts_tensor = [] -# for index, ele in enumerate(self.starts): -# starts_tensor.append(("x" + str(index), np.ones( -# (1)).astype('int64') * ele)) - -# self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} -# self.outputs = {'Out': self.out} -# self.attrs = { -# 'axes': self.axes, -# 'starts': self.starts_infer, -# 'ends': self.ends, -# 'infer_flags': self.infer_flags -# } - -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [1, 0, 2] -# self.ends = [3, 3, 4] -# self.axes = [0, 1, 2] -# self.infer_flags = [-1, 1, -1] -# self.out = self.input[1:3, 0:3, 2:4, :] - -# self.starts_infer = [-1, 0, -1] - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad_normal(self): -# self.check_grad(['Input'], 'Out', max_relative_error=0.006) - -# # Situation 2: starts(list, have tensor), ends(list, no tensor) -# # with attr(decrease) -# class TestSliceOp_decs_dim_starts_ListTensor(OpTest): -# def setUp(self): -# self.op_type = "slice" -# self.config() - -# starts_tensor = [] -# for index, ele in enumerate(self.starts): -# starts_tensor.append(("x" + str(index), np.ones( -# (1)).astype('int32') * ele)) - -# self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} - -# self.outputs = {'Out': self.out} -# self.attrs = { -# 'axes': self.axes, -# 'starts': self.starts_infer, -# 'ends': self.ends, -# 'infer_flags': self.infer_flags, -# 'decrease_axis': self.decrease_axis, -# } - -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [1, 0, 2] -# self.ends = [2, 3, 4] -# self.axes = [0, 1, 2] -# self.decrease_axis = [0] -# self.infer_flags = [1, -1, 1] -# self.out = self.input[1, 0:3, 2:4, :] - -# self.starts_infer = [1, -1, 2] - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad_normal(self): -# self.check_grad(['Input'], 'Out', max_relative_error=0.006) - -# class TestSliceOp_decs_dim_5_starts_ListTensor( -# TestSliceOp_decs_dim_starts_ListTensor): -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [-1] -# self.ends = [1000000] -# self.axes = [3] -# self.decrease_axis = [3] -# self.infer_flags = [-1] -# self.out = self.input[:, :, :, -1] - -# self.starts_infer = [-1] - -# # Situation 3: starts(tensor), ends(list, no tensor) -# # with attr(decrease) -# class TestSliceOp_decs_dim_starts_OneTensor(OpTest): -# def setUp(self): -# self.op_type = "slice" -# self.config() -# self.inputs = { -# 'Input': self.input, -# "StartsTensor": np.array( -# self.starts, dtype="int32") -# } -# self.outputs = {'Out': self.out} -# self.attrs = { -# 'axes': self.axes, -# #'starts': self.starts, -# 'ends': self.ends, -# 'infer_flags': self.infer_flags, -# 'decrease_axis': self.decrease_axis, -# } - -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [1, 0, 2] -# self.ends = [2, 3, 4] -# self.axes = [0, 1, 2] -# self.decrease_axis = [0] -# self.infer_flags = [-1, -1, -1] -# self.out = self.input[1, 0:3, 2:4, :] - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad_normal(self): -# self.check_grad(['Input'], 'Out', max_relative_error=0.006) - -# # Situation 4: starts(tensor), ends(tensor) -# # without attr(decrease) -# class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): -# def setUp(self): -# self.op_type = "slice" -# self.config() - -# self.inputs = { -# 'Input': self.input, -# "StartsTensor": np.array( -# self.starts, dtype="int64"), -# "EndsTensor": np.array( -# self.ends, dtype="int32") -# } -# self.outputs = {'Out': self.out} -# self.attrs = { -# 'axes': self.axes, -# #'starts': self.starts, -# #'ends': self.ends_infer, -# 'infer_flags': self.infer_flags -# } - -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [1, 0, 2] -# self.ends = [3, 3, 4] -# self.axes = [0, 1, 2] -# self.infer_flags = [-1, -1, -1] -# self.out = self.input[1:3, 0:3, 2:4, :] - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad_normal(self): -# self.check_grad(['Input'], 'Out', max_relative_error=0.006) - -# # Situation 5: starts(tensor), ends(tensor) -# # with attr(decrease) -# class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest): -# def setUp(self): -# self.op_type = "slice" -# self.config() -# self.inputs = { -# 'Input': self.input, -# "StartsTensor": np.array( -# self.starts, dtype="int32"), -# "EndsTensor": np.array( -# self.ends, dtype="int32") -# } -# self.outputs = {'Out': self.out} -# self.attrs = { -# 'axes': self.axes, -# #'starts': self.starts, -# #'ends': self.ends, -# 'infer_flags': self.infer_flags, -# 'decrease_axis': self.decrease_axis, -# } - -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [1, 0, 2] -# self.ends = [2, 1, 4] -# self.axes = [0, 1, 2] -# self.decrease_axis = [0, 1] -# self.infer_flags = [-1, -1, -1] -# self.out = self.input[1, 0, 2:4, :] - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad_normal(self): -# self.check_grad(['Input'], 'Out', max_relative_error=0.006) - -# # Situation 6: starts(tensor), ends(list, have tensor) -# # without attr(decrease) -# class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): -# def setUp(self): -# self.op_type = "slice" -# self.config() - -# ends_tensor = [] -# for index, ele in enumerate(self.ends): -# ends_tensor.append(("y" + str(index), np.ones( -# (1)).astype('int32') * ele)) - -# self.inputs = { -# 'Input': self.input, -# "StartsTensor": np.array( -# self.starts, dtype="int32"), -# 'EndsTensorList': ends_tensor -# } -# self.outputs = {'Out': self.out} -# self.attrs = { -# 'axes': self.axes, -# #'starts': self.starts, -# 'ends': self.ends_infer, -# 'infer_flags': self.infer_flags -# } - -# def config(self): -# self.input = np.random.random([3, 4, 5, 6]).astype("float64") -# self.starts = [1, 0, 2] -# self.ends = [3, 3, 4] -# self.axes = [0, 1, 2] -# self.infer_flags = [-1, -1, -1] -# self.out = self.input[1:3, 0:3, 2:4, :] - -# self.ends_infer = [-1, 3, 4] - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad_normal(self): -# self.check_grad(['Input'], 'Out', max_relative_error=0.006) - -# # Test CUDA float16 -# @unittest.skipIf(not core.is_compiled_with_cuda(), -# "core is not compiled with CUDA") -# class TestFP16(OpTest): -# def setUp(self): -# self.op_type = "slice" -# self.config() -# self.inputs = {'Input': self.input} -# self.outputs = {'Out': self.out} -# self.attrs = { -# 'axes': self.axes, -# 'starts': self.starts, -# 'ends': self.ends, -# 'infer_flags': self.infer_flags -# } - -# def config(self): -# self.dtype = "float16" -# self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) -# self.starts = [-3, 0, 2] -# self.ends = [3, 100, -1] -# self.axes = [0, 1, 3] -# self.out = self.input[-3:3, 0:100, :, 2:-1] -# self.infer_flags = [1, 1, 1] - -# def test_check_output(self): -# place = core.CUDAPlace(0) -# if core.is_float16_supported(place): -# self.check_output_with_place(place, atol=1e-5) - -# def test_check_grad_normal(self): -# place = core.CUDAPlace(0) -# if core.is_float16_supported(place): -# self.check_grad_with_place( -# place, ['Input'], 'Out', max_relative_error=0.006) - -# @unittest.skipIf(not core.is_compiled_with_cuda(), -# "core is not compiled with CUDA") -# class TestFP16_2(OpTest): -# def setUp(self): -# self.op_type = "slice" -# self.config() -# self.inputs = {'Input': self.input} -# self.outputs = {'Out': self.out} -# self.attrs = { -# 'axes': self.axes, -# 'starts': self.starts, -# 'ends': self.ends, -# 'infer_flags': self.infer_flags -# } - -# def config(self): -# self.dtype = "float16" -# self.input = np.random.random([3, 4, 10]).astype(self.dtype) -# self.starts = [0] -# self.ends = [1] -# self.axes = [1] -# self.out = self.input[:, 0:1, :] -# self.infer_flags = [1] - -# def test_check_output(self): -# place = core.CUDAPlace(0) -# if core.is_float16_supported(place): -# self.check_output_with_place(place, atol=1e-5) - -# def test_check_grad_normal(self): -# place = core.CUDAPlace(0) -# if core.is_float16_supported(place): -# self.check_grad_with_place( -# place, ['Input'], -# 'Out', -# max_relative_error=0.006, -# numeric_grad_delta=0.5) - -# class TestBF16(OpTest): -# def setUp(self): -# self.op_type = "slice" -# self.config() -# self.inputs = {'Input': convert_float_to_uint16(self.input)} -# self.outputs = {'Out': convert_float_to_uint16(self.out)} -# self.attrs = { -# 'axes': self.axes, -# 'starts': self.starts, -# 'ends': self.ends, -# 'infer_flags': self.infer_flags -# } - -# def config(self): -# self.dtype = np.uint16 -# self.input = np.random.random([3, 4, 5, 6]).astype(np.float32) -# self.starts = [-3, 0, 2] -# self.ends = [3, 100, -1] -# self.axes = [0, 1, 3] -# self.out = self.input[-3:3, 0:100, :, 2:-1] -# self.infer_flags = [1, 1, 1] - -# def test_check_output(self): -# self.check_output() - -# def test_check_grad_normal(self): -# self.check_grad(['Input'], 'Out') - -# # Test python API -# class TestSliceAPI(unittest.TestCase): -# def test_1(self): -# input = np.random.random([3, 4, 5, 6]).astype("float64") -# minus_1 = fluid.layers.fill_constant([1], "int32", -1) -# minus_3 = fluid.layers.fill_constant([1], "int64", -3) -# starts = fluid.layers.data( -# name='starts', shape=[1, 3], append_batch_size=False) -# ends = fluid.layers.data( -# name='ends', shape=[3], append_batch_size=False) - -# x = fluid.layers.data( -# name="x", -# shape=[3, 4, 5, 6], -# append_batch_size=False, -# dtype="float64") - -# # value_int64 is greater than 2147483647 which is the max of int32 -# value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648) - -# out_1 = fluid.layers.slice( -# x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1]) -# out_2 = fluid.layers.slice( -# x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1]) -# out_3 = fluid.layers.slice( -# x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1]) -# out_4 = fluid.layers.slice(x, axes=[0, 1, 2], starts=starts, ends=ends) - -# out_5 = x[-3:3, 0:100, 2:-1] -# out_6 = x[minus_3:3, 0:100, :, 2:-1] -# out_7 = x[minus_1, 0:100, :, 2:minus_1] - -# exe = fluid.Executor(place=fluid.CPUPlace()) -# res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run( -# fluid.default_main_program(), -# feed={ -# "x": input, -# 'starts': np.array([-3, 0, 2]).astype("int32"), -# 'ends': np.array([3, 100, -1]).astype("int32") -# }, -# fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7]) - -# assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) -# assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) -# assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) -# assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) -# assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :]) -# assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1]) -# assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1]) - -# class TestSliceApiWithTensor(unittest.TestCase): -# def test_starts_ends_is_tensor(self): -# with paddle.fluid.dygraph.guard(): -# a = paddle.rand(shape=[4, 5, 6], dtype='float32') -# axes = [0, 1, 2] -# starts = [-3, 0, 2] -# ends = [3, 2, 4] -# a_1 = paddle.slice( -# a, -# axes=axes, -# starts=paddle.to_tensor( -# starts, dtype='int32'), -# ends=paddle.to_tensor( -# ends, dtype='int32')) -# a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends) - -# self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy())) - -# def test_bool_tensor(self): -# with paddle.fluid.dygraph.guard(): -# array = (np.arange(60).reshape([3, 4, 5]) % 3).astype('bool') -# tt = paddle.to_tensor(array) -# tt.stop_gradient = False - -# starts = [0, 1, 2] -# ends = [3, 5, 4] -# axes = [0, 1, 2] - -# y_paddle = paddle.slice(tt, axes, starts, ends) -# y_np = tt[0:3, 1:5, 2:4] - -# self.assertTrue(paddle.bool == y_paddle.dtype) -# self.assertTrue(np.array_equal(y_paddle.numpy(), y_np)) - -# class TestSliceApiWithLoDTensorArray(unittest.TestCase): -# def setUp(self): -# self.shape = (3, 4) -# self.data = np.random.random(size=self.shape).astype('float32') -# self.idx = 0 -# self.start = 0 -# self.end = 2 -# self.axis = 1 - -# self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( -# ) else fluid.CPUPlace() -# self.exe = fluid.Executor(self.place) - -# def set_program_and_run(self, main_program, case_num): -# with fluid.program_guard(main_program): -# x = [ -# fluid.data( -# name='x0', shape=self.shape, dtype="float32"), fluid.data( -# name='x1', shape=self.shape, dtype="float32"), -# fluid.data( -# name='x2', shape=self.shape, dtype="float32") -# ] - -# for each_x in x: -# each_x.stop_gradient = False - -# arr = layers.create_array(dtype="float32") -# for i in range(3): -# idx = layers.array_length(arr) -# arr = layers.array_write(x=x[i], i=idx, array=arr) - -# if case_num == 1: -# self.sliced_arr = output = arr[0] - -# elif case_num == 2: -# end = fluid.layers.array_length( -# arr) - 1 # dtype of end is int64 -# self.sliced_arr = slice_arr = arr[self.start:end] -# output, _ = fluid.layers.tensor_array_to_tensor( -# slice_arr, axis=self.axis, use_stack=True) -# elif case_num == 3: -# value_int64 = fluid.layers.fill_constant([1], "int64", -# 2147483648) -# self.sliced_arr = slice_arr = arr[self.start:value_int64] -# output, _ = fluid.layers.tensor_array_to_tensor( -# slice_arr, axis=self.axis, use_stack=True) - -# loss = fluid.layers.reduce_sum(output) -# fluid.backward.append_backward(loss) -# g_vars = list( -# map(main_program.global_block().var, -# [each_x.name + "@GRAD" for each_x in x])) -# self.out, self.g_x0, self.g_x1, self.g_x2 = \ -# self.exe.run(main_program, -# feed = {'x0': self.data, -# 'x1': self.data, -# 'x2': self.data}, -# fetch_list=[output] + g_vars) - -# def test_case_1(self): -# main_program = fluid.Program() -# self.set_program_and_run(main_program, 1) - -# self.assertTrue(self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR) -# self.assertEqual(self.sliced_arr.shape, self.shape) -# self.assertTrue(np.array_equal(self.out, self.data)) -# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) -# self.assertTrue(np.array_equal(self.g_x1, np.zeros_like(self.data))) -# self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data))) - -# def test_case_2(self): -# main_program = fluid.Program() -# self.set_program_and_run(main_program, 2) - -# self.assertTrue( -# self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY) -# self.assertEqual(self.sliced_arr.shape, self.shape) -# self.assertTrue( -# np.array_equal( -# self.out, np.stack( -# [self.data, self.data], axis=self.axis))) -# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) -# self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data))) -# self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data))) - -# def test_case_3(self): -# main_program = fluid.Program() -# self.set_program_and_run(main_program, 3) - -# self.assertTrue( -# self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY) -# self.assertEqual(self.sliced_arr.shape, self.shape) -# self.assertTrue( -# np.array_equal( -# self.out, -# np.stack( -# [self.data, self.data, self.data], axis=self.axis))) -# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) -# self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data))) -# self.assertTrue(np.array_equal(self.g_x2, np.ones_like(self.data))) - -# class TestImperativeVarBaseGetItem(unittest.TestCase): -# def test_getitem_with_long(self): -# with fluid.dygraph.guard(): -# data = np.random.random((2, 80, 16128)).astype('float32') -# var = fluid.dygraph.to_variable(data) -# sliced = var[:, 10:, :var.shape[1]] # var.shape[1] is 80L here -# self.assertEqual(sliced.shape, [2, 70, 80]) - -# sliced = var[:, var.shape[0]:, var.shape[0]:var.shape[1]] -# self.assertEqual(sliced.shape, [2, 78, 78]) - -# def test_getitem_with_float(self): -# def test_float_in_slice_item(): -# with fluid.dygraph.guard(): -# data = np.random.random((2, 80, 16128)).astype('float32') -# var = fluid.dygraph.to_variable(data) -# sliced = var[:, 1.1:, :var.shape[1]] - -# self.assertRaises(Exception, test_float_in_slice_item) - -# def test_float_in_index(): -# with fluid.dygraph.guard(): -# data = np.random.random((2, 80, 16128)).astype('float32') -# var = fluid.dygraph.to_variable(data) -# sliced = var[1.1] - -# self.assertRaises(Exception, test_float_in_index) - -# class TestInferShape(unittest.TestCase): -# def test(self): -# x = paddle.ones(shape=[3, 4, 5]) -# x.desc.set_shape([3, -1, 5]) -# self.assertEqual(x.shape, (3, -1, 5)) - -# out0 = paddle.slice(x, axes=[1], starts=[0], ends=[3]) -# self.assertEqual(out0.shape, (3, 3, 5)) - -# def test_axis_less_than_zero(self): - -# # Using paddle.disable_static will make other unittests fail. -# with fluid.dygraph.guard(): -# x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4]) -# x = paddle.to_tensor(x_arr) - -# pp_slice = paddle.slice(x, [100, ], [0], [1]) -# np_slice = x_arr[:, :, 0:1] -# self.assertTrue(np.array_equal(pp_slice, np_slice)) - -# pp_slice = paddle.slice(x, (-100, ), [0], [1]) -# np_slice = x_arr[0:1] -# self.assertTrue(np.array_equal(pp_slice, np_slice)) - -# x_arr = np.array([], dtype=np.float32) -# x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0))) - -# starts = paddle.to_tensor( -# np.reshape( -# np.array( -# [], dtype=np.int32), (0, ))) -# ends = paddle.to_tensor( -# np.reshape( -# np.array( -# [], dtype=np.int32), (0, ))) - -# with self.assertRaises(ValueError): -# paddle.slice(x, [-1000000], starts, ends) - -# with self.assertRaises(ValueError): -# paddle.slice(x, [1000000], starts, ends) - -# with self.assertRaises(ValueError): -# paddle.slice(x, [], starts, ends) - -# with self.assertRaises(ValueError): -# paddle.slice(x, 0, starts, ends) - -# @unittest.skipIf(not core.is_compiled_with_cuda(), -# "core is not compiled with CUDA") -# class TestImperativeCUDAPinnedInput(unittest.TestCase): -# def test_input_cuda_pinned_var(self): -# with fluid.dygraph.guard(): -# data = np.random.random((2, 80, 16128)).astype('float32') -# var = core.VarBase( -# value=data, -# name='', -# persistable=False, -# place=fluid.CUDAPinnedPlace(), -# zero_copy=False) -# sliced = var[:, 10:, :var.shape[1]] -# self.assertEqual(sliced.shape, [2, 70, 80]) +class TestCase1(TestSliceOp): + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [-3, 0, 2] + self.ends = [3, 100, -1] + self.axes = [0, 1, 2] + self.infer_flags = [1, 1, 1] + self.out = self.input[-3:3, 0:100, 2:-1, :] + + +class TestCase2(TestSliceOp): + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [-3, 0, 2] + self.ends = [3, 100, -1] + self.axes = [0, 1, 3] + self.infer_flags = [1, 1, 1] + self.out = self.input[-3:3, 0:100, :, 2:-1] + + +# 1.2 with attr(decrease) +class TestSliceOp_decs_dim(OpTest): + def setUp(self): + self.op_type = "slice" + self.config() + self.inputs = {'Input': self.input} + self.outputs = {'Out': self.out} + self.attrs = { + 'axes': self.axes, + 'starts': self.starts, + 'ends': self.ends, + 'infer_flags': self.infer_flags, + 'decrease_axis': self.decrease_axis, + } + + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [1, 0, 2] + self.ends = [2, 3, 4] + self.axes = [0, 1, 2] + self.decrease_axis = [0] + self.infer_flags = [1, 1, 1] + self.out = self.input[1, 0:3, 2:4, :] + + # def test_check_output(self): + # self.check_output() + + def test_check_grad_normal(self): + print(self.input.size) + self.check_grad(['Input'], 'Out', max_relative_error=0.006) + + +class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim): + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [1, 0, 2] + self.ends = [2, 1, 4] + self.axes = [0, 1, 2] + self.decrease_axis = [0, 1] + self.infer_flags = [1, 1, 1] + self.out = self.input[1, 0, 2:4, :] + + +class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim): + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [-1, 0, 2] + self.ends = [1000000, 1, 4] + self.axes = [0, 1, 2] + self.decrease_axis = [0, 1] + self.infer_flags = [1, 1, 1] + self.out = self.input[-1, 0, 2:4, :] + + +class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim): + def config(self): + self.input = np.random.random([3, 4, 5, 7]).astype("float64") + self.starts = [0, 1, 2, 3] + self.ends = [1, 2, 3, 4] + self.axes = [0, 1, 2, 3] + self.decrease_axis = [0, 1, 2, 3] + self.infer_flags = [1, 1, 1] + self.out = self.input[0, 1, 2, 3:4] + + +class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim): + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [-1] + self.ends = [1000000] + self.axes = [3] + self.decrease_axis = [3] + self.infer_flags = [1, 1, 1] + self.out = self.input[:, :, :, -1] + + +class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim): + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [0, 1, 2, 3] + self.ends = [1, 2, 3, 4] + self.axes = [0, 1, 2, 3] + self.decrease_axis = [0, 1, 2, 3] + self.infer_flags = [1, 1, 1] + self.out = self.input[0, 1, 2, 3:4] + + +# Situation 2: starts(list, have tensor), ends(list, no tensor) +# without attr(decrease) +class TestSliceOp_starts_ListTensor(OpTest): + def setUp(self): + self.op_type = "slice" + self.config() + + starts_tensor = [] + for index, ele in enumerate(self.starts): + starts_tensor.append(("x" + str(index), np.ones( + (1)).astype('int64') * ele)) + + self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} + self.outputs = {'Out': self.out} + self.attrs = { + 'axes': self.axes, + 'starts': self.starts_infer, + 'ends': self.ends, + 'infer_flags': self.infer_flags + } + + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [1, 0, 2] + self.ends = [3, 3, 4] + self.axes = [0, 1, 2] + self.infer_flags = [-1, 1, -1] + self.out = self.input[1:3, 0:3, 2:4, :] + + self.starts_infer = [-1, 0, -1] + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['Input'], 'Out', max_relative_error=0.006) + + +# Situation 2: starts(list, have tensor), ends(list, no tensor) +# with attr(decrease) +class TestSliceOp_decs_dim_starts_ListTensor(OpTest): + def setUp(self): + self.op_type = "slice" + self.config() + + starts_tensor = [] + for index, ele in enumerate(self.starts): + starts_tensor.append(("x" + str(index), np.ones( + (1)).astype('int32') * ele)) + + self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} + + self.outputs = {'Out': self.out} + self.attrs = { + 'axes': self.axes, + 'starts': self.starts_infer, + 'ends': self.ends, + 'infer_flags': self.infer_flags, + 'decrease_axis': self.decrease_axis, + } + + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [1, 0, 2] + self.ends = [2, 3, 4] + self.axes = [0, 1, 2] + self.decrease_axis = [0] + self.infer_flags = [1, -1, 1] + self.out = self.input[1, 0:3, 2:4, :] + + self.starts_infer = [1, -1, 2] + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['Input'], 'Out', max_relative_error=0.006) + + +class TestSliceOp_decs_dim_5_starts_ListTensor( + TestSliceOp_decs_dim_starts_ListTensor): + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [-1] + self.ends = [1000000] + self.axes = [3] + self.decrease_axis = [3] + self.infer_flags = [-1] + self.out = self.input[:, :, :, -1] + + self.starts_infer = [-1] + + +# Situation 3: starts(tensor), ends(list, no tensor) +# with attr(decrease) +class TestSliceOp_decs_dim_starts_OneTensor(OpTest): + def setUp(self): + self.op_type = "slice" + self.config() + self.inputs = { + 'Input': self.input, + "StartsTensor": np.array( + self.starts, dtype="int32") + } + self.outputs = {'Out': self.out} + self.attrs = { + 'axes': self.axes, + #'starts': self.starts, + 'ends': self.ends, + 'infer_flags': self.infer_flags, + 'decrease_axis': self.decrease_axis, + } + + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [1, 0, 2] + self.ends = [2, 3, 4] + self.axes = [0, 1, 2] + self.decrease_axis = [0] + self.infer_flags = [-1, -1, -1] + self.out = self.input[1, 0:3, 2:4, :] + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['Input'], 'Out', max_relative_error=0.006) + + +# Situation 4: starts(tensor), ends(tensor) +# without attr(decrease) +class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): + def setUp(self): + self.op_type = "slice" + self.config() + + self.inputs = { + 'Input': self.input, + "StartsTensor": np.array( + self.starts, dtype="int64"), + "EndsTensor": np.array( + self.ends, dtype="int32") + } + self.outputs = {'Out': self.out} + self.attrs = { + 'axes': self.axes, + #'starts': self.starts, + #'ends': self.ends_infer, + 'infer_flags': self.infer_flags + } + + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [1, 0, 2] + self.ends = [3, 3, 4] + self.axes = [0, 1, 2] + self.infer_flags = [-1, -1, -1] + self.out = self.input[1:3, 0:3, 2:4, :] + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['Input'], 'Out', max_relative_error=0.006) + + +# Situation 5: starts(tensor), ends(tensor) +# with attr(decrease) +class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest): + def setUp(self): + self.op_type = "slice" + self.config() + self.inputs = { + 'Input': self.input, + "StartsTensor": np.array( + self.starts, dtype="int32"), + "EndsTensor": np.array( + self.ends, dtype="int32") + } + self.outputs = {'Out': self.out} + self.attrs = { + 'axes': self.axes, + #'starts': self.starts, + #'ends': self.ends, + 'infer_flags': self.infer_flags, + 'decrease_axis': self.decrease_axis, + } + + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [1, 0, 2] + self.ends = [2, 1, 4] + self.axes = [0, 1, 2] + self.decrease_axis = [0, 1] + self.infer_flags = [-1, -1, -1] + self.out = self.input[1, 0, 2:4, :] + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['Input'], 'Out', max_relative_error=0.006) + + +# Situation 6: starts(tensor), ends(list, have tensor) +# without attr(decrease) +class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): + def setUp(self): + self.op_type = "slice" + self.config() + + ends_tensor = [] + for index, ele in enumerate(self.ends): + ends_tensor.append(("y" + str(index), np.ones( + (1)).astype('int32') * ele)) + + self.inputs = { + 'Input': self.input, + "StartsTensor": np.array( + self.starts, dtype="int32"), + 'EndsTensorList': ends_tensor + } + self.outputs = {'Out': self.out} + self.attrs = { + 'axes': self.axes, + #'starts': self.starts, + 'ends': self.ends_infer, + 'infer_flags': self.infer_flags + } + + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float64") + self.starts = [1, 0, 2] + self.ends = [3, 3, 4] + self.axes = [0, 1, 2] + self.infer_flags = [-1, -1, -1] + self.out = self.input[1:3, 0:3, 2:4, :] + + self.ends_infer = [-1, 3, 4] + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['Input'], 'Out', max_relative_error=0.006) + + +# Test CUDA float16 +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestFP16(OpTest): + def setUp(self): + self.op_type = "slice" + self.config() + self.inputs = {'Input': self.input} + self.outputs = {'Out': self.out} + self.attrs = { + 'axes': self.axes, + 'starts': self.starts, + 'ends': self.ends, + 'infer_flags': self.infer_flags + } + + def config(self): + self.dtype = "float16" + self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) + self.starts = [-3, 0, 2] + self.ends = [3, 100, -1] + self.axes = [0, 1, 3] + self.out = self.input[-3:3, 0:100, :, 2:-1] + self.infer_flags = [1, 1, 1] + + def test_check_output(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-5) + + def test_check_grad_normal(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_grad_with_place( + place, ['Input'], 'Out', max_relative_error=0.006) + + +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestFP16_2(OpTest): + def setUp(self): + self.op_type = "slice" + self.config() + self.inputs = {'Input': self.input} + self.outputs = {'Out': self.out} + self.attrs = { + 'axes': self.axes, + 'starts': self.starts, + 'ends': self.ends, + 'infer_flags': self.infer_flags + } + + def config(self): + self.dtype = "float16" + self.input = np.random.random([3, 4, 10]).astype(self.dtype) + self.starts = [0] + self.ends = [1] + self.axes = [1] + self.out = self.input[:, 0:1, :] + self.infer_flags = [1] + + def test_check_output(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-5) + + def test_check_grad_normal(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_grad_with_place( + place, ['Input'], + 'Out', + max_relative_error=0.006, + numeric_grad_delta=0.5) + + +class TestBF16(OpTest): + def setUp(self): + self.op_type = "slice" + self.config() + self.inputs = {'Input': convert_float_to_uint16(self.input)} + self.outputs = {'Out': convert_float_to_uint16(self.out)} + self.attrs = { + 'axes': self.axes, + 'starts': self.starts, + 'ends': self.ends, + 'infer_flags': self.infer_flags + } + + def config(self): + self.dtype = np.uint16 + self.input = np.random.random([3, 4, 5, 6]).astype(np.float32) + self.starts = [-3, 0, 2] + self.ends = [3, 100, -1] + self.axes = [0, 1, 3] + self.out = self.input[-3:3, 0:100, :, 2:-1] + self.infer_flags = [1, 1, 1] + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['Input'], 'Out') + + +# Test python API +class TestSliceAPI(unittest.TestCase): + def test_1(self): + input = np.random.random([3, 4, 5, 6]).astype("float64") + minus_1 = fluid.layers.fill_constant([1], "int32", -1) + minus_3 = fluid.layers.fill_constant([1], "int64", -3) + starts = fluid.layers.data( + name='starts', shape=[1, 3], append_batch_size=False) + ends = fluid.layers.data( + name='ends', shape=[3], append_batch_size=False) + + x = fluid.layers.data( + name="x", + shape=[3, 4, 5, 6], + append_batch_size=False, + dtype="float64") + + # value_int64 is greater than 2147483647 which is the max of int32 + value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648) + + out_1 = fluid.layers.slice( + x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1]) + out_2 = fluid.layers.slice( + x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1]) + out_3 = fluid.layers.slice( + x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1]) + out_4 = fluid.layers.slice(x, axes=[0, 1, 2], starts=starts, ends=ends) + + out_5 = x[-3:3, 0:100, 2:-1] + out_6 = x[minus_3:3, 0:100, :, 2:-1] + out_7 = x[minus_1, 0:100, :, 2:minus_1] + + exe = fluid.Executor(place=fluid.CPUPlace()) + res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run( + fluid.default_main_program(), + feed={ + "x": input, + 'starts': np.array([-3, 0, 2]).astype("int32"), + 'ends': np.array([3, 100, -1]).astype("int32") + }, + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7]) + + assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) + assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) + assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) + assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) + assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :]) + assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1]) + assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1]) + + +class TestSliceApiWithTensor(unittest.TestCase): + def test_starts_ends_is_tensor(self): + with paddle.fluid.dygraph.guard(): + a = paddle.rand(shape=[4, 5, 6], dtype='float32') + axes = [0, 1, 2] + starts = [-3, 0, 2] + ends = [3, 2, 4] + a_1 = paddle.slice( + a, + axes=axes, + starts=paddle.to_tensor( + starts, dtype='int32'), + ends=paddle.to_tensor( + ends, dtype='int32')) + a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends) + + self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy())) + + def test_bool_tensor(self): + with paddle.fluid.dygraph.guard(): + array = (np.arange(60).reshape([3, 4, 5]) % 3).astype('bool') + tt = paddle.to_tensor(array) + tt.stop_gradient = False + + starts = [0, 1, 2] + ends = [3, 5, 4] + axes = [0, 1, 2] + + y_paddle = paddle.slice(tt, axes, starts, ends) + y_np = tt[0:3, 1:5, 2:4] + + self.assertTrue(paddle.bool == y_paddle.dtype) + self.assertTrue(np.array_equal(y_paddle.numpy(), y_np)) + + +class TestSliceApiWithLoDTensorArray(unittest.TestCase): + def setUp(self): + self.shape = (3, 4) + self.data = np.random.random(size=self.shape).astype('float32') + self.idx = 0 + self.start = 0 + self.end = 2 + self.axis = 1 + + self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( + ) else fluid.CPUPlace() + self.exe = fluid.Executor(self.place) + + def set_program_and_run(self, main_program, case_num): + with fluid.program_guard(main_program): + x = [ + fluid.data( + name='x0', shape=self.shape, dtype="float32"), fluid.data( + name='x1', shape=self.shape, dtype="float32"), + fluid.data( + name='x2', shape=self.shape, dtype="float32") + ] + + for each_x in x: + each_x.stop_gradient = False + + arr = layers.create_array(dtype="float32") + for i in range(3): + idx = layers.array_length(arr) + arr = layers.array_write(x=x[i], i=idx, array=arr) + + if case_num == 1: + self.sliced_arr = output = arr[0] + + elif case_num == 2: + end = fluid.layers.array_length( + arr) - 1 # dtype of end is int64 + self.sliced_arr = slice_arr = arr[self.start:end] + output, _ = fluid.layers.tensor_array_to_tensor( + slice_arr, axis=self.axis, use_stack=True) + elif case_num == 3: + value_int64 = fluid.layers.fill_constant([1], "int64", + 2147483648) + self.sliced_arr = slice_arr = arr[self.start:value_int64] + output, _ = fluid.layers.tensor_array_to_tensor( + slice_arr, axis=self.axis, use_stack=True) + + loss = fluid.layers.reduce_sum(output) + fluid.backward.append_backward(loss) + g_vars = list( + map(main_program.global_block().var, + [each_x.name + "@GRAD" for each_x in x])) + self.out, self.g_x0, self.g_x1, self.g_x2 = \ + self.exe.run(main_program, + feed = {'x0': self.data, + 'x1': self.data, + 'x2': self.data}, + fetch_list=[output] + g_vars) + + def test_case_1(self): + main_program = fluid.Program() + self.set_program_and_run(main_program, 1) + + self.assertTrue(self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR) + self.assertEqual(self.sliced_arr.shape, self.shape) + self.assertTrue(np.array_equal(self.out, self.data)) + self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) + self.assertTrue(np.array_equal(self.g_x1, np.zeros_like(self.data))) + self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data))) + + def test_case_2(self): + main_program = fluid.Program() + self.set_program_and_run(main_program, 2) + + self.assertTrue( + self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY) + self.assertEqual(self.sliced_arr.shape, self.shape) + self.assertTrue( + np.array_equal( + self.out, np.stack( + [self.data, self.data], axis=self.axis))) + self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) + self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data))) + self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data))) + + def test_case_3(self): + main_program = fluid.Program() + self.set_program_and_run(main_program, 3) + + self.assertTrue( + self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY) + self.assertEqual(self.sliced_arr.shape, self.shape) + self.assertTrue( + np.array_equal( + self.out, + np.stack( + [self.data, self.data, self.data], axis=self.axis))) + self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) + self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data))) + self.assertTrue(np.array_equal(self.g_x2, np.ones_like(self.data))) + + +class TestImperativeVarBaseGetItem(unittest.TestCase): + def test_getitem_with_long(self): + with fluid.dygraph.guard(): + data = np.random.random((2, 80, 16128)).astype('float32') + var = fluid.dygraph.to_variable(data) + sliced = var[:, 10:, :var.shape[1]] # var.shape[1] is 80L here + self.assertEqual(sliced.shape, [2, 70, 80]) + + sliced = var[:, var.shape[0]:, var.shape[0]:var.shape[1]] + self.assertEqual(sliced.shape, [2, 78, 78]) + + def test_getitem_with_float(self): + def test_float_in_slice_item(): + with fluid.dygraph.guard(): + data = np.random.random((2, 80, 16128)).astype('float32') + var = fluid.dygraph.to_variable(data) + sliced = var[:, 1.1:, :var.shape[1]] + + self.assertRaises(Exception, test_float_in_slice_item) + + def test_float_in_index(): + with fluid.dygraph.guard(): + data = np.random.random((2, 80, 16128)).astype('float32') + var = fluid.dygraph.to_variable(data) + sliced = var[1.1] + + self.assertRaises(Exception, test_float_in_index) + + +class TestInferShape(unittest.TestCase): + def test(self): + x = paddle.ones(shape=[3, 4, 5]) + x.desc.set_shape([3, -1, 5]) + self.assertEqual(x.shape, (3, -1, 5)) + + out0 = paddle.slice(x, axes=[1], starts=[0], ends=[3]) + self.assertEqual(out0.shape, (3, 3, 5)) + + def test_axis_less_than_zero(self): + + # Using paddle.disable_static will make other unittests fail. + with fluid.dygraph.guard(): + x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4]) + x = paddle.to_tensor(x_arr) + + pp_slice = paddle.slice(x, [100, ], [0], [1]) + np_slice = x_arr[:, :, 0:1] + self.assertTrue(np.array_equal(pp_slice, np_slice)) + + pp_slice = paddle.slice(x, (-100, ), [0], [1]) + np_slice = x_arr[0:1] + self.assertTrue(np.array_equal(pp_slice, np_slice)) + + x_arr = np.array([], dtype=np.float32) + x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0))) + + starts = paddle.to_tensor( + np.reshape( + np.array( + [], dtype=np.int32), (0, ))) + ends = paddle.to_tensor( + np.reshape( + np.array( + [], dtype=np.int32), (0, ))) + + with self.assertRaises(ValueError): + paddle.slice(x, [-1000000], starts, ends) + + with self.assertRaises(ValueError): + paddle.slice(x, [1000000], starts, ends) + + with self.assertRaises(ValueError): + paddle.slice(x, [], starts, ends) + + with self.assertRaises(ValueError): + paddle.slice(x, 0, starts, ends) + + +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestImperativeCUDAPinnedInput(unittest.TestCase): + def test_input_cuda_pinned_var(self): + with fluid.dygraph.guard(): + data = np.random.random((2, 80, 16128)).astype('float32') + var = core.VarBase( + value=data, + name='', + persistable=False, + place=fluid.CUDAPinnedPlace(), + zero_copy=False) + sliced = var[:, 10:, :var.shape[1]] + self.assertEqual(sliced.shape, [2, 70, 80]) + if __name__ == '__main__': paddle.enable_static() -- GitLab