提交 586671ea 编写于 作者: P phlrain

fix error

上级 d35f5882
...@@ -28,10 +28,103 @@ using Variable = framework::Variable; ...@@ -28,10 +28,103 @@ using Variable = framework::Variable;
using LoDTensorArray = framework::LoDTensorArray; using LoDTensorArray = framework::LoDTensorArray;
using DDim = framework::DDim; using DDim = framework::DDim;
inline void DealTensorArray(const framework::ExecutionContext& ctx,
const std::vector<int64_t>& starts,
const std::vector<int64_t>& ends,
bool out_is_array) {
auto in_array = ctx.Input<LoDTensorArray>("Input");
// If the input is LoDTensorArray, the rank of input is 1.
int64_t in_size = in_array->size();
int64_t start = starts[0] < 0 ? (starts[0] + in_size) : starts[0];
int64_t end = ends[0] < 0 ? (ends[0] + in_size) : ends[0];
start = std::max(start, static_cast<int64_t>(0));
end = std::max(end, static_cast<int64_t>(0));
end = std::min(end, in_size);
if (starts[0] == -1 && end == 0) {
end = start + 1;
}
PADDLE_ENFORCE_GT(end, start,
platform::errors::InvalidArgument(
"Attr(ends) should be greater than attr(starts) in "
"slice op. But received end = %d, start = %d.",
ends[0], starts[0]));
int64_t out_size = end - start;
if (out_is_array) {
auto out_array = ctx.Output<LoDTensorArray>("Out");
out_array->resize(out_size);
for (int i = 0; i < out_size; ++i) {
auto* out_tensor = &out_array->at(i);
auto in_tensor = in_array->at(i + start);
out_tensor->set_lod(in_tensor.lod());
if (in_tensor.memory_size() > 0) {
paddle::framework::TensorCopy(in_tensor, ctx.GetPlace(), out_tensor);
} else {
VLOG(10) << "WARNING: The input tensor 'x_tensor' holds no memory, so "
"nothing has been written to output array["
<< i << "].";
}
}
} else {
auto out = ctx.Output<Tensor>("Out");
auto in_tensor = in_array->at(start);
paddle::framework::TensorCopy(in_tensor, ctx.GetPlace(), out);
}
}
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
class SliceKernel : public framework::OpKernel<T> { class SliceKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override {} void Compute(const framework::ExecutionContext& ctx) const override {
const Variable* input_var = ctx.InputVar("Input");
Variable* out_var = ctx.OutputVar("Out");
bool input_is_array = input_var->IsType<LoDTensorArray>();
bool out_is_array = out_var->IsType<LoDTensorArray>();
auto axes_int = ctx.Attr<std::vector<int>>("axes");
auto starts_int = ctx.Attr<std::vector<int>>("starts");
auto ends_int = ctx.Attr<std::vector<int>>("ends");
std::vector<int64_t> axes(axes_int.begin(), axes_int.end());
std::vector<int64_t> starts(starts_int.begin(), starts_int.end());
std::vector<int64_t> ends(ends_int.begin(), ends_int.end());
auto decrease_axis = ctx.Attr<std::vector<int>>("decrease_axis");
auto infer_flags = ctx.Attr<std::vector<int>>("infer_flags");
// Step 1: Get the accurate attribute value of starts and ends
auto starts_tensor_list = ctx.MultiInput<Tensor>("StartsTensorList");
if (ctx.HasInput("StartsTensor")) {
starts = GetDataFromTensor<int64_t>(ctx.Input<Tensor>("StartsTensor"));
} else if (starts_tensor_list.size() > 0) {
starts = GetDataFromTensorList<int64_t>(starts_tensor_list);
}
auto ends_tensor_list = ctx.MultiInput<Tensor>("EndsTensorList");
if (ctx.HasInput("EndsTensor")) {
ends = GetDataFromTensor<int64_t>(ctx.Input<Tensor>("EndsTensor"));
} else if (ends_tensor_list.size() > 0) {
ends = GetDataFromTensorList<int64_t>(ends_tensor_list);
}
PADDLE_ENFORCE_EQ(
starts.size(), axes.size(),
platform::errors::InvalidArgument(
"The size of starts must be equal to the size of axes."));
PADDLE_ENFORCE_EQ(
ends.size(), axes.size(),
platform::errors::InvalidArgument(
"The size of ends must be equal to the size of axes."));
// Step 2: Compute output
if (input_is_array) {
DealTensorArray(ctx, starts, ends, out_is_array);
return;
}
}
private: private:
}; };
...@@ -39,7 +132,73 @@ class SliceKernel : public framework::OpKernel<T> { ...@@ -39,7 +132,73 @@ class SliceKernel : public framework::OpKernel<T> {
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
class SliceGradKernel : public framework::OpKernel<T> { class SliceGradKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override {} void Compute(const framework::ExecutionContext& ctx) const override {
auto axes = ctx.Attr<std::vector<int>>("axes");
auto starts_int = ctx.Attr<std::vector<int>>("starts");
auto ends_int = ctx.Attr<std::vector<int>>("ends");
std::vector<int64_t> starts(starts_int.begin(), starts_int.end());
std::vector<int64_t> ends(ends_int.begin(), ends_int.end());
// Get the accurate attribute value of starts and ends
auto starts_tensor_list = ctx.MultiInput<Tensor>("StartsTensorList");
if (ctx.HasInput("StartsTensor")) {
starts = GetDataFromTensor<int64_t>(ctx.Input<Tensor>("StartsTensor"));
} else if (starts_tensor_list.size() > 0) {
starts = GetDataFromTensorList<int64_t>(starts_tensor_list);
}
auto ends_tensor_list = ctx.MultiInput<Tensor>("EndsTensorList");
if (ctx.HasInput("EndsTensor")) {
ends = GetDataFromTensor<int64_t>(ctx.Input<Tensor>("EndsTensor"));
} else if (ends_tensor_list.size() > 0) {
ends = GetDataFromTensorList<int64_t>(ends_tensor_list);
}
Variable* d_input_var = ctx.OutputVar(framework::GradVarName("Input"));
const Variable* d_out_var = ctx.InputVar(framework::GradVarName("Out"));
bool d_input_is_array = d_input_var->IsType<LoDTensorArray>();
bool d_out_is_array = d_out_var->IsType<LoDTensorArray>();
if (d_input_is_array) {
auto* input_array = ctx.Input<LoDTensorArray>("Input");
auto* d_in_arr =
ctx.Output<LoDTensorArray>(framework::GradVarName("Input"));
int64_t d_in_size = input_array->size();
d_in_arr->resize(d_in_size);
// If the input is LoDTensorArray, the rank of input is 1.
// So only use the 0th element of starts.
int64_t start = starts[0] < 0 ? (starts[0] + d_in_size) : starts[0];
start = std::max(start, static_cast<int64_t>(0));
// set zero
platform::DeviceContextPool& pool =
platform::DeviceContextPool::Instance();
auto& dev_ctx = *pool.Get(ctx.GetPlace());
phi::funcs::SetConstant<DeviceContext, T> functor;
for (int i = 0; i < d_in_size; ++i) {
auto dim = input_array->at(i).dims();
d_in_arr->at(i).Resize(dim);
d_in_arr->at(i).mutable_data<T>(ctx.GetPlace());
functor(reinterpret_cast<const DeviceContext&>(dev_ctx),
&d_in_arr->at(i), static_cast<T>(0));
}
if (d_out_is_array) {
auto* d_out_arr =
ctx.Input<LoDTensorArray>(framework::GradVarName("Out"));
int d_out_size = d_out_arr->size();
for (int i = 0; i < d_out_size; ++i) {
paddle::framework::TensorCopy(d_out_arr->at(i), ctx.GetPlace(),
&(d_in_arr->at(start + i)));
}
} else {
auto* d_out = ctx.Input<Tensor>(framework::GradVarName("Out"));
paddle::framework::TensorCopy(*d_out, ctx.GetPlace(),
&(d_in_arr->at(start)));
}
return;
}
}
private: private:
}; };
......
...@@ -29,4 +29,5 @@ PD_REGISTER_KERNEL(slice_grad, ...@@ -29,4 +29,5 @@ PD_REGISTER_KERNEL(slice_grad,
double, double,
phi::dtype::complex<float>, phi::dtype::complex<float>,
phi::dtype::complex<double>, phi::dtype::complex<double>,
phi::dtype::bfloat16) {} phi::dtype::bfloat16,
phi::dtype::float16) {}
...@@ -30,6 +30,8 @@ void LaunchEigenPadding( ...@@ -30,6 +30,8 @@ void LaunchEigenPadding(
const DDim& out_dims, const DDim& out_dims,
const Eigen::array<std::pair<int64_t, int64_t>, D>& paddings) { const Eigen::array<std::pair<int64_t, int64_t>, D>& paddings) {
auto& place = *context.template eigen_device(); auto& place = *context.template eigen_device();
LOG(ERROR) << D << "\t" << in_dims;
LOG(ERROR) << out_dims;
auto d_in_t = EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From( auto d_in_t = EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
*d_input, in_dims); *d_input, in_dims);
auto d_out_t = EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From( auto d_out_t = EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
...@@ -150,12 +152,12 @@ void EigenPaddingCompute( ...@@ -150,12 +152,12 @@ void EigenPaddingCompute(
// the second dimension do not need padding, set padding[1] zero // the second dimension do not need padding, set padding[1] zero
reshaped_padding[1].first = reshaped_padding[1].second = 0; reshaped_padding[1].first = reshaped_padding[1].second = 0;
LaunchEigenPadding<T, Context>(context, LaunchEigenPadding<T, Context, 2>(context,
d_input, d_input,
reshaped_in_dims, reshaped_in_dims,
d_out, d_out,
reshaped_out_dims, reshaped_out_dims,
reshaped_padding); reshaped_padding);
} else { } else {
// other dimension need padding // other dimension need padding
// reshape the dimension of tensor in 3: // reshape the dimension of tensor in 3:
...@@ -190,12 +192,13 @@ void EigenPaddingCompute( ...@@ -190,12 +192,13 @@ void EigenPaddingCompute(
// the third dimension do not need padding, set padding[2] zero // the third dimension do not need padding, set padding[2] zero
reshaped_padding[2].first = reshaped_padding[2].second = 0; reshaped_padding[2].first = reshaped_padding[2].second = 0;
LaunchEigenPadding<T, Context>(context, LOG(ERROR) << "run here";
d_input, LaunchEigenPadding<T, Context, 3>(context,
reshaped_in_dims, d_input,
d_out, reshaped_in_dims,
reshaped_out_dims, d_out,
reshaped_padding); reshaped_out_dims,
reshaped_padding);
} }
} else { } else {
// need padding at many dimension, cannot reduce dimension // need padding at many dimension, cannot reduce dimension
...@@ -270,14 +273,18 @@ void SliceGradCompute(const Context& ctx, ...@@ -270,14 +273,18 @@ void SliceGradCompute(const Context& ctx,
template <typename T, typename Context> template <typename T, typename Context>
void SliceGradRawKernel(const Context& ctx, void SliceGradRawKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const std::vector<int64_t>& starts, const ScalarArray& starts_arr,
const std::vector<int64_t>& ends, const ScalarArray& ends_arr,
const std::vector<int64_t>& infer_flags, const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis, const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad) { DenseTensor* input_grad) {
size_t rank = out_grad.dims().size(); size_t rank = input.dims().size();
auto& starts = starts_arr.GetData();
auto& ends = ends_arr.GetData();
switch (rank) { switch (rank) {
case 1: case 1:
......
...@@ -110,13 +110,16 @@ template <typename T, typename Context> ...@@ -110,13 +110,16 @@ template <typename T, typename Context>
void SliceRawKernel(const Context& ctx, void SliceRawKernel(const Context& ctx,
const DenseTensor& input, const DenseTensor& input,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const std::vector<int64_t>& starts, const ScalarArray& starts_arr,
const std::vector<int64_t>& ends, const ScalarArray& ends_arr,
const std::vector<int64_t>& infer_flags, const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis, const std::vector<int64_t>& decrease_axis,
DenseTensor* out) { DenseTensor* out) {
int rank = input.dims().size(); int rank = input.dims().size();
auto& starts = starts_arr.GetData();
auto& ends = ends_arr.GetData();
switch (rank) { switch (rank) {
case 1: case 1:
SliceCompute<T, Context, 1>( SliceCompute<T, Context, 1>(
......
...@@ -14,16 +14,18 @@ ...@@ -14,16 +14,18 @@
#pragma once #pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void SliceGradRawKernel(const Context& ctx, void SliceGradRawKernel(const Context& ctx,
const DenseTensor& input,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const std::vector<int64_t>& starts, const ScalarArray& starts,
const std::vector<int64_t>& ends, const ScalarArray& ends,
const std::vector<int64_t>& infer_flags, const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis, const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad); DenseTensor* input_grad);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#pragma once #pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
namespace phi { namespace phi {
...@@ -22,8 +23,8 @@ template <typename T, typename Context> ...@@ -22,8 +23,8 @@ template <typename T, typename Context>
void SliceRawKernel(const Context& ctx, void SliceRawKernel(const Context& ctx,
const DenseTensor& input, const DenseTensor& input,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const std::vector<int64_t>& starts, const ScalarArray& starts,
const std::vector<int64_t>& ends, const ScalarArray& ends,
const std::vector<int64_t>& infer_flags, const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis, const std::vector<int64_t>& decrease_axis,
DenseTensor* out); DenseTensor* out);
......
...@@ -17,19 +17,155 @@ ...@@ -17,19 +17,155 @@
namespace phi { namespace phi {
KernelSignature SliceOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature SliceOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature( if (ctx.HasInput("StartsTensor")) {
"slice", if (ctx.HasInput("EndsTensor")) {
{"Input"}, return KernelSignature("slice",
{"axes", "starts", "ends", "infer_flags", "decrease_axis"}, {"Input"},
{"Out"}); {"axes",
"StartsTensor",
"EndsTensor",
"infer_flags",
"decrease_axis"},
{"Out"});
} else if (ctx.InputSize("EndsTensorList") > 0) {
return KernelSignature("slice",
{"Input"},
{"axes",
"StartsTensor",
"EndsTensorList",
"infer_flags",
"decrease_axis"},
{"Out"});
} else {
return KernelSignature(
"slice",
{"Input"},
{"axes", "StartsTensor", "ends", "infer_flags", "decrease_axis"},
{"Out"});
}
} else if (ctx.InputSize("StartsTensorList") > 0) {
if (ctx.HasInput("EndsTensor")) {
return KernelSignature("slice",
{"Input"},
{"axes",
"StartsTensorList",
"EndsTensor",
"infer_flags",
"decrease_axis"},
{"Out"});
} else if (ctx.InputSize("EndsTensorList") > 0) {
return KernelSignature("slice",
{"Input"},
{"axes",
"StartsTensorList",
"EndsTensorList",
"infer_flags",
"decrease_axis"},
{"Out"});
} else {
return KernelSignature(
"slice",
{"Input"},
{"axes", "StartsTensorList", "ends", "infer_flags", "decrease_axis"},
{"Out"});
}
} else {
if (ctx.HasInput("EndsTensor")) {
return KernelSignature(
"slice",
{"Input"},
{"axes", "starts", "EndsTensor", "infer_flags", "decrease_axis"},
{"Out"});
} else if (ctx.InputSize("EndsTensorList") > 0) {
return KernelSignature(
"slice",
{"Input"},
{"axes", "starts", "EndsTensorList", "infer_flags", "decrease_axis"},
{"Out"});
} else {
return KernelSignature(
"slice",
{"Input"},
{"axes", "starts", "ends", "infer_flags", "decrease_axis"},
{"Out"});
}
}
} }
KernelSignature SliceGradOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature SliceGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature( if (ctx.HasInput("StartsTensor")) {
"slice_grad", if (ctx.HasInput("EndsTensor")) {
{GradVarName("Out")}, return KernelSignature("slice_grad",
{"axes", "starts", "ends", "infer_flags", "decrease_axis"}, {"Input", GradVarName("Out")},
{GradVarName("Input")}); {"axes",
"StartsTensor",
"EndsTensor",
"infer_flags",
"decrease_axis"},
{GradVarName("Input")});
} else if (ctx.InputSize("EndsTensorList") > 0) {
return KernelSignature("slice_grad",
{"Input", GradVarName("Out")},
{"axes",
"StartsTensor",
"EndsTensorList",
"infer_flags",
"decrease_axis"},
{GradVarName("Input")});
} else {
return KernelSignature(
"slice_grad",
{"Input", GradVarName("Out")},
{"axes", "StartsTensor", "ends", "infer_flags", "decrease_axis"},
{GradVarName("Input")});
}
} else if (ctx.InputSize("StartsTensorList") > 0) {
if (ctx.HasInput("EndsTensor")) {
return KernelSignature("slice_grad",
{"Input", GradVarName("Out")},
{"axes",
"StartsTensorList",
"EndsTensor",
"infer_flags",
"decrease_axis"},
{GradVarName("Input")});
} else if (ctx.InputSize("EndsTensorList") > 0) {
return KernelSignature("slice_grad",
{"Input", GradVarName("Out")},
{"axes",
"StartsTensorList",
"EndsTensorList",
"infer_flags",
"decrease_axis"},
{GradVarName("Input")});
} else {
return KernelSignature(
"slice_grad",
{"Input", GradVarName("Out")},
{"axes", "StartsTensorList", "ends", "infer_flags", "decrease_axis"},
{GradVarName("Input")});
}
} else {
if (ctx.HasInput("EndsTensor")) {
return KernelSignature(
"slice_grad",
{"Input", GradVarName("Out")},
{"axes", "starts", "EndsTensor", "infer_flags", "decrease_axis"},
{GradVarName("Input")});
} else if (ctx.InputSize("EndsTensorList") > 0) {
return KernelSignature(
"slice_grad",
{"Input", GradVarName("Out")},
{"axes", "starts", "EndsTensorList", "infer_flags", "decrease_axis"},
{GradVarName("Input")});
} else {
return KernelSignature(
"slice_grad",
{"Input", GradVarName("Out")},
{"axes", "starts", "ends", "infer_flags", "decrease_axis"},
{GradVarName("Input")});
}
}
} }
} // namespace phi } // namespace phi
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#pragma once #pragma once
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
namespace phi { namespace phi {
...@@ -22,8 +23,8 @@ template <typename T, typename Context> ...@@ -22,8 +23,8 @@ template <typename T, typename Context>
void SliceRawKernel(const Context& ctx, void SliceRawKernel(const Context& ctx,
const DenseTensor& input, const DenseTensor& input,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const std::vector<int64_t>& starts, const ScalarArray& starts,
const std::vector<int64_t>& ends, const ScalarArray& ends,
const std::vector<int64_t>& infer_flags, const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis, const std::vector<int64_t>& decrease_axis,
DenseTensor* out); DenseTensor* out);
......
...@@ -55,721 +55,746 @@ class TestSliceOp(OpTest): ...@@ -55,721 +55,746 @@ class TestSliceOp(OpTest):
self.check_grad(['Input'], 'Out', max_relative_error=0.006) self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# class TestCase1(TestSliceOp): class TestCase1(TestSliceOp):
# def config(self): def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [-3, 0, 2] self.starts = [-3, 0, 2]
# self.ends = [3, 100, -1] self.ends = [3, 100, -1]
# self.axes = [0, 1, 2] self.axes = [0, 1, 2]
# self.infer_flags = [1, 1, 1] self.infer_flags = [1, 1, 1]
# self.out = self.input[-3:3, 0:100, 2:-1, :] self.out = self.input[-3:3, 0:100, 2:-1, :]
# class TestCase2(TestSliceOp):
# def config(self): class TestCase2(TestSliceOp):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64") def config(self):
# self.starts = [-3, 0, 2] self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.ends = [3, 100, -1] self.starts = [-3, 0, 2]
# self.axes = [0, 1, 3] self.ends = [3, 100, -1]
# self.infer_flags = [1, 1, 1] self.axes = [0, 1, 3]
# self.out = self.input[-3:3, 0:100, :, 2:-1] self.infer_flags = [1, 1, 1]
self.out = self.input[-3:3, 0:100, :, 2:-1]
# # 1.2 with attr(decrease)
# class TestSliceOp_decs_dim(OpTest):
# def setUp(self): # 1.2 with attr(decrease)
# self.op_type = "slice" class TestSliceOp_decs_dim(OpTest):
# self.config() def setUp(self):
# self.inputs = {'Input': self.input} self.op_type = "slice"
# self.outputs = {'Out': self.out} self.config()
# self.attrs = { self.inputs = {'Input': self.input}
# 'axes': self.axes, self.outputs = {'Out': self.out}
# 'starts': self.starts, self.attrs = {
# 'ends': self.ends, 'axes': self.axes,
# 'infer_flags': self.infer_flags, 'starts': self.starts,
# 'decrease_axis': self.decrease_axis, 'ends': self.ends,
# } 'infer_flags': self.infer_flags,
'decrease_axis': self.decrease_axis,
# def config(self): }
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [1, 0, 2] def config(self):
# self.ends = [2, 3, 4] self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.axes = [0, 1, 2] self.starts = [1, 0, 2]
# self.decrease_axis = [0] self.ends = [2, 3, 4]
# self.infer_flags = [1, 1, 1] self.axes = [0, 1, 2]
# self.out = self.input[1, 0:3, 2:4, :] self.decrease_axis = [0]
self.infer_flags = [1, 1, 1]
# def test_check_output(self): self.out = self.input[1, 0:3, 2:4, :]
# self.check_output()
# def test_check_output(self):
# def test_check_grad_normal(self): # self.check_output()
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
def test_check_grad_normal(self):
# class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim): print(self.input.size)
# def config(self): self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [1, 0, 2]
# self.ends = [2, 1, 4] class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim):
# self.axes = [0, 1, 2] def config(self):
# self.decrease_axis = [0, 1] self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.infer_flags = [1, 1, 1] self.starts = [1, 0, 2]
# self.out = self.input[1, 0, 2:4, :] self.ends = [2, 1, 4]
self.axes = [0, 1, 2]
# class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim): self.decrease_axis = [0, 1]
# def config(self): self.infer_flags = [1, 1, 1]
# self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.out = self.input[1, 0, 2:4, :]
# self.starts = [-1, 0, 2]
# self.ends = [1000000, 1, 4]
# self.axes = [0, 1, 2] class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim):
# self.decrease_axis = [0, 1] def config(self):
# self.infer_flags = [1, 1, 1] self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.out = self.input[-1, 0, 2:4, :] self.starts = [-1, 0, 2]
self.ends = [1000000, 1, 4]
# class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim): self.axes = [0, 1, 2]
# def config(self): self.decrease_axis = [0, 1]
# self.input = np.random.random([3, 4, 5, 7]).astype("float64") self.infer_flags = [1, 1, 1]
# self.starts = [0, 1, 2, 3] self.out = self.input[-1, 0, 2:4, :]
# self.ends = [1, 2, 3, 4]
# self.axes = [0, 1, 2, 3]
# self.decrease_axis = [0, 1, 2, 3] class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim):
# self.infer_flags = [1, 1, 1] def config(self):
# self.out = self.input[0, 1, 2, 3:4] self.input = np.random.random([3, 4, 5, 7]).astype("float64")
self.starts = [0, 1, 2, 3]
# class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim): self.ends = [1, 2, 3, 4]
# def config(self): self.axes = [0, 1, 2, 3]
# self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.decrease_axis = [0, 1, 2, 3]
# self.starts = [-1] self.infer_flags = [1, 1, 1]
# self.ends = [1000000] self.out = self.input[0, 1, 2, 3:4]
# self.axes = [3]
# self.decrease_axis = [3]
# self.infer_flags = [1, 1, 1] class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim):
# self.out = self.input[:, :, :, -1] def config(self):
self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim): self.starts = [-1]
# def config(self): self.ends = [1000000]
# self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.axes = [3]
# self.starts = [0, 1, 2, 3] self.decrease_axis = [3]
# self.ends = [1, 2, 3, 4] self.infer_flags = [1, 1, 1]
# self.axes = [0, 1, 2, 3] self.out = self.input[:, :, :, -1]
# self.decrease_axis = [0, 1, 2, 3]
# self.infer_flags = [1, 1, 1]
# self.out = self.input[0, 1, 2, 3:4] class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim):
def config(self):
# # Situation 2: starts(list, have tensor), ends(list, no tensor) self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# # without attr(decrease) self.starts = [0, 1, 2, 3]
# class TestSliceOp_starts_ListTensor(OpTest): self.ends = [1, 2, 3, 4]
# def setUp(self): self.axes = [0, 1, 2, 3]
# self.op_type = "slice" self.decrease_axis = [0, 1, 2, 3]
# self.config() self.infer_flags = [1, 1, 1]
self.out = self.input[0, 1, 2, 3:4]
# starts_tensor = []
# for index, ele in enumerate(self.starts):
# starts_tensor.append(("x" + str(index), np.ones( # Situation 2: starts(list, have tensor), ends(list, no tensor)
# (1)).astype('int64') * ele)) # without attr(decrease)
class TestSliceOp_starts_ListTensor(OpTest):
# self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} def setUp(self):
# self.outputs = {'Out': self.out} self.op_type = "slice"
# self.attrs = { self.config()
# 'axes': self.axes,
# 'starts': self.starts_infer, starts_tensor = []
# 'ends': self.ends, for index, ele in enumerate(self.starts):
# 'infer_flags': self.infer_flags starts_tensor.append(("x" + str(index), np.ones(
# } (1)).astype('int64') * ele))
# def config(self): self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}
# self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.outputs = {'Out': self.out}
# self.starts = [1, 0, 2] self.attrs = {
# self.ends = [3, 3, 4] 'axes': self.axes,
# self.axes = [0, 1, 2] 'starts': self.starts_infer,
# self.infer_flags = [-1, 1, -1] 'ends': self.ends,
# self.out = self.input[1:3, 0:3, 2:4, :] 'infer_flags': self.infer_flags
}
# self.starts_infer = [-1, 0, -1]
def config(self):
# def test_check_output(self): self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.check_output() self.starts = [1, 0, 2]
self.ends = [3, 3, 4]
# def test_check_grad_normal(self): self.axes = [0, 1, 2]
# self.check_grad(['Input'], 'Out', max_relative_error=0.006) self.infer_flags = [-1, 1, -1]
self.out = self.input[1:3, 0:3, 2:4, :]
# # Situation 2: starts(list, have tensor), ends(list, no tensor)
# # with attr(decrease) self.starts_infer = [-1, 0, -1]
# class TestSliceOp_decs_dim_starts_ListTensor(OpTest):
# def setUp(self): def test_check_output(self):
# self.op_type = "slice" self.check_output()
# self.config()
def test_check_grad_normal(self):
# starts_tensor = [] self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# for index, ele in enumerate(self.starts):
# starts_tensor.append(("x" + str(index), np.ones(
# (1)).astype('int32') * ele)) # Situation 2: starts(list, have tensor), ends(list, no tensor)
# with attr(decrease)
# self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} class TestSliceOp_decs_dim_starts_ListTensor(OpTest):
def setUp(self):
# self.outputs = {'Out': self.out} self.op_type = "slice"
# self.attrs = { self.config()
# 'axes': self.axes,
# 'starts': self.starts_infer, starts_tensor = []
# 'ends': self.ends, for index, ele in enumerate(self.starts):
# 'infer_flags': self.infer_flags, starts_tensor.append(("x" + str(index), np.ones(
# 'decrease_axis': self.decrease_axis, (1)).astype('int32') * ele))
# }
self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.outputs = {'Out': self.out}
# self.starts = [1, 0, 2] self.attrs = {
# self.ends = [2, 3, 4] 'axes': self.axes,
# self.axes = [0, 1, 2] 'starts': self.starts_infer,
# self.decrease_axis = [0] 'ends': self.ends,
# self.infer_flags = [1, -1, 1] 'infer_flags': self.infer_flags,
# self.out = self.input[1, 0:3, 2:4, :] 'decrease_axis': self.decrease_axis,
}
# self.starts_infer = [1, -1, 2]
def config(self):
# def test_check_output(self): self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.check_output() self.starts = [1, 0, 2]
self.ends = [2, 3, 4]
# def test_check_grad_normal(self): self.axes = [0, 1, 2]
# self.check_grad(['Input'], 'Out', max_relative_error=0.006) self.decrease_axis = [0]
self.infer_flags = [1, -1, 1]
# class TestSliceOp_decs_dim_5_starts_ListTensor( self.out = self.input[1, 0:3, 2:4, :]
# TestSliceOp_decs_dim_starts_ListTensor):
# def config(self): self.starts_infer = [1, -1, 2]
# self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# self.starts = [-1] def test_check_output(self):
# self.ends = [1000000] self.check_output()
# self.axes = [3]
# self.decrease_axis = [3] def test_check_grad_normal(self):
# self.infer_flags = [-1] self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# self.out = self.input[:, :, :, -1]
# self.starts_infer = [-1] class TestSliceOp_decs_dim_5_starts_ListTensor(
TestSliceOp_decs_dim_starts_ListTensor):
# # Situation 3: starts(tensor), ends(list, no tensor) def config(self):
# # with attr(decrease) self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# class TestSliceOp_decs_dim_starts_OneTensor(OpTest): self.starts = [-1]
# def setUp(self): self.ends = [1000000]
# self.op_type = "slice" self.axes = [3]
# self.config() self.decrease_axis = [3]
# self.inputs = { self.infer_flags = [-1]
# 'Input': self.input, self.out = self.input[:, :, :, -1]
# "StartsTensor": np.array(
# self.starts, dtype="int32") self.starts_infer = [-1]
# }
# self.outputs = {'Out': self.out}
# self.attrs = { # Situation 3: starts(tensor), ends(list, no tensor)
# 'axes': self.axes, # with attr(decrease)
# #'starts': self.starts, class TestSliceOp_decs_dim_starts_OneTensor(OpTest):
# 'ends': self.ends, def setUp(self):
# 'infer_flags': self.infer_flags, self.op_type = "slice"
# 'decrease_axis': self.decrease_axis, self.config()
# } self.inputs = {
'Input': self.input,
# def config(self): "StartsTensor": np.array(
# self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts, dtype="int32")
# self.starts = [1, 0, 2] }
# self.ends = [2, 3, 4] self.outputs = {'Out': self.out}
# self.axes = [0, 1, 2] self.attrs = {
# self.decrease_axis = [0] 'axes': self.axes,
# self.infer_flags = [-1, -1, -1] #'starts': self.starts,
# self.out = self.input[1, 0:3, 2:4, :] 'ends': self.ends,
'infer_flags': self.infer_flags,
# def test_check_output(self): 'decrease_axis': self.decrease_axis,
# self.check_output() }
# def test_check_grad_normal(self): def config(self):
# self.check_grad(['Input'], 'Out', max_relative_error=0.006) self.input = np.random.random([3, 4, 5, 6]).astype("float64")
self.starts = [1, 0, 2]
# # Situation 4: starts(tensor), ends(tensor) self.ends = [2, 3, 4]
# # without attr(decrease) self.axes = [0, 1, 2]
# class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): self.decrease_axis = [0]
# def setUp(self): self.infer_flags = [-1, -1, -1]
# self.op_type = "slice" self.out = self.input[1, 0:3, 2:4, :]
# self.config()
def test_check_output(self):
# self.inputs = { self.check_output()
# 'Input': self.input,
# "StartsTensor": np.array( def test_check_grad_normal(self):
# self.starts, dtype="int64"), self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# "EndsTensor": np.array(
# self.ends, dtype="int32")
# } # Situation 4: starts(tensor), ends(tensor)
# self.outputs = {'Out': self.out} # without attr(decrease)
# self.attrs = { class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest):
# 'axes': self.axes, def setUp(self):
# #'starts': self.starts, self.op_type = "slice"
# #'ends': self.ends_infer, self.config()
# 'infer_flags': self.infer_flags
# } self.inputs = {
'Input': self.input,
# def config(self): "StartsTensor": np.array(
# self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts, dtype="int64"),
# self.starts = [1, 0, 2] "EndsTensor": np.array(
# self.ends = [3, 3, 4] self.ends, dtype="int32")
# self.axes = [0, 1, 2] }
# self.infer_flags = [-1, -1, -1] self.outputs = {'Out': self.out}
# self.out = self.input[1:3, 0:3, 2:4, :] self.attrs = {
'axes': self.axes,
# def test_check_output(self): #'starts': self.starts,
# self.check_output() #'ends': self.ends_infer,
'infer_flags': self.infer_flags
# def test_check_grad_normal(self): }
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
def config(self):
# # Situation 5: starts(tensor), ends(tensor) self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# # with attr(decrease) self.starts = [1, 0, 2]
# class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest): self.ends = [3, 3, 4]
# def setUp(self): self.axes = [0, 1, 2]
# self.op_type = "slice" self.infer_flags = [-1, -1, -1]
# self.config() self.out = self.input[1:3, 0:3, 2:4, :]
# self.inputs = {
# 'Input': self.input, def test_check_output(self):
# "StartsTensor": np.array( self.check_output()
# self.starts, dtype="int32"),
# "EndsTensor": np.array( def test_check_grad_normal(self):
# self.ends, dtype="int32") self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# }
# self.outputs = {'Out': self.out}
# self.attrs = { # Situation 5: starts(tensor), ends(tensor)
# 'axes': self.axes, # with attr(decrease)
# #'starts': self.starts, class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest):
# #'ends': self.ends, def setUp(self):
# 'infer_flags': self.infer_flags, self.op_type = "slice"
# 'decrease_axis': self.decrease_axis, self.config()
# } self.inputs = {
'Input': self.input,
# def config(self): "StartsTensor": np.array(
# self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts, dtype="int32"),
# self.starts = [1, 0, 2] "EndsTensor": np.array(
# self.ends = [2, 1, 4] self.ends, dtype="int32")
# self.axes = [0, 1, 2] }
# self.decrease_axis = [0, 1] self.outputs = {'Out': self.out}
# self.infer_flags = [-1, -1, -1] self.attrs = {
# self.out = self.input[1, 0, 2:4, :] 'axes': self.axes,
#'starts': self.starts,
# def test_check_output(self): #'ends': self.ends,
# self.check_output() 'infer_flags': self.infer_flags,
'decrease_axis': self.decrease_axis,
# def test_check_grad_normal(self): }
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
def config(self):
# # Situation 6: starts(tensor), ends(list, have tensor) self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# # without attr(decrease) self.starts = [1, 0, 2]
# class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): self.ends = [2, 1, 4]
# def setUp(self): self.axes = [0, 1, 2]
# self.op_type = "slice" self.decrease_axis = [0, 1]
# self.config() self.infer_flags = [-1, -1, -1]
self.out = self.input[1, 0, 2:4, :]
# ends_tensor = []
# for index, ele in enumerate(self.ends): def test_check_output(self):
# ends_tensor.append(("y" + str(index), np.ones( self.check_output()
# (1)).astype('int32') * ele))
def test_check_grad_normal(self):
# self.inputs = { self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# 'Input': self.input,
# "StartsTensor": np.array(
# self.starts, dtype="int32"), # Situation 6: starts(tensor), ends(list, have tensor)
# 'EndsTensorList': ends_tensor # without attr(decrease)
# } class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest):
# self.outputs = {'Out': self.out} def setUp(self):
# self.attrs = { self.op_type = "slice"
# 'axes': self.axes, self.config()
# #'starts': self.starts,
# 'ends': self.ends_infer, ends_tensor = []
# 'infer_flags': self.infer_flags for index, ele in enumerate(self.ends):
# } ends_tensor.append(("y" + str(index), np.ones(
(1)).astype('int32') * ele))
# def config(self):
# self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.inputs = {
# self.starts = [1, 0, 2] 'Input': self.input,
# self.ends = [3, 3, 4] "StartsTensor": np.array(
# self.axes = [0, 1, 2] self.starts, dtype="int32"),
# self.infer_flags = [-1, -1, -1] 'EndsTensorList': ends_tensor
# self.out = self.input[1:3, 0:3, 2:4, :] }
self.outputs = {'Out': self.out}
# self.ends_infer = [-1, 3, 4] self.attrs = {
'axes': self.axes,
# def test_check_output(self): #'starts': self.starts,
# self.check_output() 'ends': self.ends_infer,
'infer_flags': self.infer_flags
# def test_check_grad_normal(self): }
# self.check_grad(['Input'], 'Out', max_relative_error=0.006)
def config(self):
# # Test CUDA float16 self.input = np.random.random([3, 4, 5, 6]).astype("float64")
# @unittest.skipIf(not core.is_compiled_with_cuda(), self.starts = [1, 0, 2]
# "core is not compiled with CUDA") self.ends = [3, 3, 4]
# class TestFP16(OpTest): self.axes = [0, 1, 2]
# def setUp(self): self.infer_flags = [-1, -1, -1]
# self.op_type = "slice" self.out = self.input[1:3, 0:3, 2:4, :]
# self.config()
# self.inputs = {'Input': self.input} self.ends_infer = [-1, 3, 4]
# self.outputs = {'Out': self.out}
# self.attrs = { def test_check_output(self):
# 'axes': self.axes, self.check_output()
# 'starts': self.starts,
# 'ends': self.ends, def test_check_grad_normal(self):
# 'infer_flags': self.infer_flags self.check_grad(['Input'], 'Out', max_relative_error=0.006)
# }
# def config(self): # Test CUDA float16
# self.dtype = "float16" @unittest.skipIf(not core.is_compiled_with_cuda(),
# self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype) "core is not compiled with CUDA")
# self.starts = [-3, 0, 2] class TestFP16(OpTest):
# self.ends = [3, 100, -1] def setUp(self):
# self.axes = [0, 1, 3] self.op_type = "slice"
# self.out = self.input[-3:3, 0:100, :, 2:-1] self.config()
# self.infer_flags = [1, 1, 1] self.inputs = {'Input': self.input}
self.outputs = {'Out': self.out}
# def test_check_output(self): self.attrs = {
# place = core.CUDAPlace(0) 'axes': self.axes,
# if core.is_float16_supported(place): 'starts': self.starts,
# self.check_output_with_place(place, atol=1e-5) 'ends': self.ends,
'infer_flags': self.infer_flags
# def test_check_grad_normal(self): }
# place = core.CUDAPlace(0)
# if core.is_float16_supported(place): def config(self):
# self.check_grad_with_place( self.dtype = "float16"
# place, ['Input'], 'Out', max_relative_error=0.006) self.input = np.random.random([3, 4, 5, 6]).astype(self.dtype)
self.starts = [-3, 0, 2]
# @unittest.skipIf(not core.is_compiled_with_cuda(), self.ends = [3, 100, -1]
# "core is not compiled with CUDA") self.axes = [0, 1, 3]
# class TestFP16_2(OpTest): self.out = self.input[-3:3, 0:100, :, 2:-1]
# def setUp(self): self.infer_flags = [1, 1, 1]
# self.op_type = "slice"
# self.config() def test_check_output(self):
# self.inputs = {'Input': self.input} place = core.CUDAPlace(0)
# self.outputs = {'Out': self.out} if core.is_float16_supported(place):
# self.attrs = { self.check_output_with_place(place, atol=1e-5)
# 'axes': self.axes,
# 'starts': self.starts, def test_check_grad_normal(self):
# 'ends': self.ends, place = core.CUDAPlace(0)
# 'infer_flags': self.infer_flags if core.is_float16_supported(place):
# } self.check_grad_with_place(
place, ['Input'], 'Out', max_relative_error=0.006)
# def config(self):
# self.dtype = "float16"
# self.input = np.random.random([3, 4, 10]).astype(self.dtype) @unittest.skipIf(not core.is_compiled_with_cuda(),
# self.starts = [0] "core is not compiled with CUDA")
# self.ends = [1] class TestFP16_2(OpTest):
# self.axes = [1] def setUp(self):
# self.out = self.input[:, 0:1, :] self.op_type = "slice"
# self.infer_flags = [1] self.config()
self.inputs = {'Input': self.input}
# def test_check_output(self): self.outputs = {'Out': self.out}
# place = core.CUDAPlace(0) self.attrs = {
# if core.is_float16_supported(place): 'axes': self.axes,
# self.check_output_with_place(place, atol=1e-5) 'starts': self.starts,
'ends': self.ends,
# def test_check_grad_normal(self): 'infer_flags': self.infer_flags
# place = core.CUDAPlace(0) }
# if core.is_float16_supported(place):
# self.check_grad_with_place( def config(self):
# place, ['Input'], self.dtype = "float16"
# 'Out', self.input = np.random.random([3, 4, 10]).astype(self.dtype)
# max_relative_error=0.006, self.starts = [0]
# numeric_grad_delta=0.5) self.ends = [1]
self.axes = [1]
# class TestBF16(OpTest): self.out = self.input[:, 0:1, :]
# def setUp(self): self.infer_flags = [1]
# self.op_type = "slice"
# self.config() def test_check_output(self):
# self.inputs = {'Input': convert_float_to_uint16(self.input)} place = core.CUDAPlace(0)
# self.outputs = {'Out': convert_float_to_uint16(self.out)} if core.is_float16_supported(place):
# self.attrs = { self.check_output_with_place(place, atol=1e-5)
# 'axes': self.axes,
# 'starts': self.starts, def test_check_grad_normal(self):
# 'ends': self.ends, place = core.CUDAPlace(0)
# 'infer_flags': self.infer_flags if core.is_float16_supported(place):
# } self.check_grad_with_place(
place, ['Input'],
# def config(self): 'Out',
# self.dtype = np.uint16 max_relative_error=0.006,
# self.input = np.random.random([3, 4, 5, 6]).astype(np.float32) numeric_grad_delta=0.5)
# self.starts = [-3, 0, 2]
# self.ends = [3, 100, -1]
# self.axes = [0, 1, 3] class TestBF16(OpTest):
# self.out = self.input[-3:3, 0:100, :, 2:-1] def setUp(self):
# self.infer_flags = [1, 1, 1] self.op_type = "slice"
self.config()
# def test_check_output(self): self.inputs = {'Input': convert_float_to_uint16(self.input)}
# self.check_output() self.outputs = {'Out': convert_float_to_uint16(self.out)}
self.attrs = {
# def test_check_grad_normal(self): 'axes': self.axes,
# self.check_grad(['Input'], 'Out') 'starts': self.starts,
'ends': self.ends,
# # Test python API 'infer_flags': self.infer_flags
# class TestSliceAPI(unittest.TestCase): }
# def test_1(self):
# input = np.random.random([3, 4, 5, 6]).astype("float64") def config(self):
# minus_1 = fluid.layers.fill_constant([1], "int32", -1) self.dtype = np.uint16
# minus_3 = fluid.layers.fill_constant([1], "int64", -3) self.input = np.random.random([3, 4, 5, 6]).astype(np.float32)
# starts = fluid.layers.data( self.starts = [-3, 0, 2]
# name='starts', shape=[1, 3], append_batch_size=False) self.ends = [3, 100, -1]
# ends = fluid.layers.data( self.axes = [0, 1, 3]
# name='ends', shape=[3], append_batch_size=False) self.out = self.input[-3:3, 0:100, :, 2:-1]
self.infer_flags = [1, 1, 1]
# x = fluid.layers.data(
# name="x", def test_check_output(self):
# shape=[3, 4, 5, 6], self.check_output()
# append_batch_size=False,
# dtype="float64") def test_check_grad_normal(self):
self.check_grad(['Input'], 'Out')
# # value_int64 is greater than 2147483647 which is the max of int32
# value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648)
# Test python API
# out_1 = fluid.layers.slice( class TestSliceAPI(unittest.TestCase):
# x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1]) def test_1(self):
# out_2 = fluid.layers.slice( input = np.random.random([3, 4, 5, 6]).astype("float64")
# x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1]) minus_1 = fluid.layers.fill_constant([1], "int32", -1)
# out_3 = fluid.layers.slice( minus_3 = fluid.layers.fill_constant([1], "int64", -3)
# x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1]) starts = fluid.layers.data(
# out_4 = fluid.layers.slice(x, axes=[0, 1, 2], starts=starts, ends=ends) name='starts', shape=[1, 3], append_batch_size=False)
ends = fluid.layers.data(
# out_5 = x[-3:3, 0:100, 2:-1] name='ends', shape=[3], append_batch_size=False)
# out_6 = x[minus_3:3, 0:100, :, 2:-1]
# out_7 = x[minus_1, 0:100, :, 2:minus_1] x = fluid.layers.data(
name="x",
# exe = fluid.Executor(place=fluid.CPUPlace()) shape=[3, 4, 5, 6],
# res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run( append_batch_size=False,
# fluid.default_main_program(), dtype="float64")
# feed={
# "x": input, # value_int64 is greater than 2147483647 which is the max of int32
# 'starts': np.array([-3, 0, 2]).astype("int32"), value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648)
# 'ends': np.array([3, 100, -1]).astype("int32")
# }, out_1 = fluid.layers.slice(
# fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7]) x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1])
out_2 = fluid.layers.slice(
# assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :]) x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1])
# assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1]) out_3 = fluid.layers.slice(
# assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1]) x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1])
# assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :]) out_4 = fluid.layers.slice(x, axes=[0, 1, 2], starts=starts, ends=ends)
# assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :])
# assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1]) out_5 = x[-3:3, 0:100, 2:-1]
# assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1]) out_6 = x[minus_3:3, 0:100, :, 2:-1]
out_7 = x[minus_1, 0:100, :, 2:minus_1]
# class TestSliceApiWithTensor(unittest.TestCase):
# def test_starts_ends_is_tensor(self): exe = fluid.Executor(place=fluid.CPUPlace())
# with paddle.fluid.dygraph.guard(): res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run(
# a = paddle.rand(shape=[4, 5, 6], dtype='float32') fluid.default_main_program(),
# axes = [0, 1, 2] feed={
# starts = [-3, 0, 2] "x": input,
# ends = [3, 2, 4] 'starts': np.array([-3, 0, 2]).astype("int32"),
# a_1 = paddle.slice( 'ends': np.array([3, 100, -1]).astype("int32")
# a, },
# axes=axes, fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7])
# starts=paddle.to_tensor(
# starts, dtype='int32'), assert np.array_equal(res_1, input[-3:3, 0:100, 2:-1, :])
# ends=paddle.to_tensor( assert np.array_equal(res_2, input[-3:3, 0:100, :, 2:-1])
# ends, dtype='int32')) assert np.array_equal(res_3, input[-3:3, 0:100, :, 2:-1])
# a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends) assert np.array_equal(res_4, input[-3:3, 0:100, 2:-1, :])
assert np.array_equal(res_5, input[-3:3, 0:100, 2:-1, :])
# self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy())) assert np.array_equal(res_6, input[-3:3, 0:100, :, 2:-1])
assert np.array_equal(res_7, input[-1, 0:100, :, 2:-1])
# def test_bool_tensor(self):
# with paddle.fluid.dygraph.guard():
# array = (np.arange(60).reshape([3, 4, 5]) % 3).astype('bool') class TestSliceApiWithTensor(unittest.TestCase):
# tt = paddle.to_tensor(array) def test_starts_ends_is_tensor(self):
# tt.stop_gradient = False with paddle.fluid.dygraph.guard():
a = paddle.rand(shape=[4, 5, 6], dtype='float32')
# starts = [0, 1, 2] axes = [0, 1, 2]
# ends = [3, 5, 4] starts = [-3, 0, 2]
# axes = [0, 1, 2] ends = [3, 2, 4]
a_1 = paddle.slice(
# y_paddle = paddle.slice(tt, axes, starts, ends) a,
# y_np = tt[0:3, 1:5, 2:4] axes=axes,
starts=paddle.to_tensor(
# self.assertTrue(paddle.bool == y_paddle.dtype) starts, dtype='int32'),
# self.assertTrue(np.array_equal(y_paddle.numpy(), y_np)) ends=paddle.to_tensor(
ends, dtype='int32'))
# class TestSliceApiWithLoDTensorArray(unittest.TestCase): a_2 = paddle.slice(a, axes=axes, starts=starts, ends=ends)
# def setUp(self):
# self.shape = (3, 4) self.assertTrue(np.array_equal(a_1.numpy(), a_2.numpy()))
# self.data = np.random.random(size=self.shape).astype('float32')
# self.idx = 0 def test_bool_tensor(self):
# self.start = 0 with paddle.fluid.dygraph.guard():
# self.end = 2 array = (np.arange(60).reshape([3, 4, 5]) % 3).astype('bool')
# self.axis = 1 tt = paddle.to_tensor(array)
tt.stop_gradient = False
# self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
# ) else fluid.CPUPlace() starts = [0, 1, 2]
# self.exe = fluid.Executor(self.place) ends = [3, 5, 4]
axes = [0, 1, 2]
# def set_program_and_run(self, main_program, case_num):
# with fluid.program_guard(main_program): y_paddle = paddle.slice(tt, axes, starts, ends)
# x = [ y_np = tt[0:3, 1:5, 2:4]
# fluid.data(
# name='x0', shape=self.shape, dtype="float32"), fluid.data( self.assertTrue(paddle.bool == y_paddle.dtype)
# name='x1', shape=self.shape, dtype="float32"), self.assertTrue(np.array_equal(y_paddle.numpy(), y_np))
# fluid.data(
# name='x2', shape=self.shape, dtype="float32")
# ] class TestSliceApiWithLoDTensorArray(unittest.TestCase):
def setUp(self):
# for each_x in x: self.shape = (3, 4)
# each_x.stop_gradient = False self.data = np.random.random(size=self.shape).astype('float32')
self.idx = 0
# arr = layers.create_array(dtype="float32") self.start = 0
# for i in range(3): self.end = 2
# idx = layers.array_length(arr) self.axis = 1
# arr = layers.array_write(x=x[i], i=idx, array=arr)
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
# if case_num == 1: ) else fluid.CPUPlace()
# self.sliced_arr = output = arr[0] self.exe = fluid.Executor(self.place)
# elif case_num == 2: def set_program_and_run(self, main_program, case_num):
# end = fluid.layers.array_length( with fluid.program_guard(main_program):
# arr) - 1 # dtype of end is int64 x = [
# self.sliced_arr = slice_arr = arr[self.start:end] fluid.data(
# output, _ = fluid.layers.tensor_array_to_tensor( name='x0', shape=self.shape, dtype="float32"), fluid.data(
# slice_arr, axis=self.axis, use_stack=True) name='x1', shape=self.shape, dtype="float32"),
# elif case_num == 3: fluid.data(
# value_int64 = fluid.layers.fill_constant([1], "int64", name='x2', shape=self.shape, dtype="float32")
# 2147483648) ]
# self.sliced_arr = slice_arr = arr[self.start:value_int64]
# output, _ = fluid.layers.tensor_array_to_tensor( for each_x in x:
# slice_arr, axis=self.axis, use_stack=True) each_x.stop_gradient = False
# loss = fluid.layers.reduce_sum(output) arr = layers.create_array(dtype="float32")
# fluid.backward.append_backward(loss) for i in range(3):
# g_vars = list( idx = layers.array_length(arr)
# map(main_program.global_block().var, arr = layers.array_write(x=x[i], i=idx, array=arr)
# [each_x.name + "@GRAD" for each_x in x]))
# self.out, self.g_x0, self.g_x1, self.g_x2 = \ if case_num == 1:
# self.exe.run(main_program, self.sliced_arr = output = arr[0]
# feed = {'x0': self.data,
# 'x1': self.data, elif case_num == 2:
# 'x2': self.data}, end = fluid.layers.array_length(
# fetch_list=[output] + g_vars) arr) - 1 # dtype of end is int64
self.sliced_arr = slice_arr = arr[self.start:end]
# def test_case_1(self): output, _ = fluid.layers.tensor_array_to_tensor(
# main_program = fluid.Program() slice_arr, axis=self.axis, use_stack=True)
# self.set_program_and_run(main_program, 1) elif case_num == 3:
value_int64 = fluid.layers.fill_constant([1], "int64",
# self.assertTrue(self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR) 2147483648)
# self.assertEqual(self.sliced_arr.shape, self.shape) self.sliced_arr = slice_arr = arr[self.start:value_int64]
# self.assertTrue(np.array_equal(self.out, self.data)) output, _ = fluid.layers.tensor_array_to_tensor(
# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) slice_arr, axis=self.axis, use_stack=True)
# self.assertTrue(np.array_equal(self.g_x1, np.zeros_like(self.data)))
# self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data))) loss = fluid.layers.reduce_sum(output)
fluid.backward.append_backward(loss)
# def test_case_2(self): g_vars = list(
# main_program = fluid.Program() map(main_program.global_block().var,
# self.set_program_and_run(main_program, 2) [each_x.name + "@GRAD" for each_x in x]))
self.out, self.g_x0, self.g_x1, self.g_x2 = \
# self.assertTrue( self.exe.run(main_program,
# self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY) feed = {'x0': self.data,
# self.assertEqual(self.sliced_arr.shape, self.shape) 'x1': self.data,
# self.assertTrue( 'x2': self.data},
# np.array_equal( fetch_list=[output] + g_vars)
# self.out, np.stack(
# [self.data, self.data], axis=self.axis))) def test_case_1(self):
# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) main_program = fluid.Program()
# self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data))) self.set_program_and_run(main_program, 1)
# self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data)))
self.assertTrue(self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR)
# def test_case_3(self): self.assertEqual(self.sliced_arr.shape, self.shape)
# main_program = fluid.Program() self.assertTrue(np.array_equal(self.out, self.data))
# self.set_program_and_run(main_program, 3) self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
self.assertTrue(np.array_equal(self.g_x1, np.zeros_like(self.data)))
# self.assertTrue( self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data)))
# self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY)
# self.assertEqual(self.sliced_arr.shape, self.shape) def test_case_2(self):
# self.assertTrue( main_program = fluid.Program()
# np.array_equal( self.set_program_and_run(main_program, 2)
# self.out,
# np.stack( self.assertTrue(
# [self.data, self.data, self.data], axis=self.axis))) self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY)
# self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data))) self.assertEqual(self.sliced_arr.shape, self.shape)
# self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data))) self.assertTrue(
# self.assertTrue(np.array_equal(self.g_x2, np.ones_like(self.data))) np.array_equal(
self.out, np.stack(
# class TestImperativeVarBaseGetItem(unittest.TestCase): [self.data, self.data], axis=self.axis)))
# def test_getitem_with_long(self): self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
# with fluid.dygraph.guard(): self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data)))
# data = np.random.random((2, 80, 16128)).astype('float32') self.assertTrue(np.array_equal(self.g_x2, np.zeros_like(self.data)))
# var = fluid.dygraph.to_variable(data)
# sliced = var[:, 10:, :var.shape[1]] # var.shape[1] is 80L here def test_case_3(self):
# self.assertEqual(sliced.shape, [2, 70, 80]) main_program = fluid.Program()
self.set_program_and_run(main_program, 3)
# sliced = var[:, var.shape[0]:, var.shape[0]:var.shape[1]]
# self.assertEqual(sliced.shape, [2, 78, 78]) self.assertTrue(
self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY)
# def test_getitem_with_float(self): self.assertEqual(self.sliced_arr.shape, self.shape)
# def test_float_in_slice_item(): self.assertTrue(
# with fluid.dygraph.guard(): np.array_equal(
# data = np.random.random((2, 80, 16128)).astype('float32') self.out,
# var = fluid.dygraph.to_variable(data) np.stack(
# sliced = var[:, 1.1:, :var.shape[1]] [self.data, self.data, self.data], axis=self.axis)))
self.assertTrue(np.array_equal(self.g_x0, np.ones_like(self.data)))
# self.assertRaises(Exception, test_float_in_slice_item) self.assertTrue(np.array_equal(self.g_x1, np.ones_like(self.data)))
self.assertTrue(np.array_equal(self.g_x2, np.ones_like(self.data)))
# def test_float_in_index():
# with fluid.dygraph.guard():
# data = np.random.random((2, 80, 16128)).astype('float32') class TestImperativeVarBaseGetItem(unittest.TestCase):
# var = fluid.dygraph.to_variable(data) def test_getitem_with_long(self):
# sliced = var[1.1] with fluid.dygraph.guard():
data = np.random.random((2, 80, 16128)).astype('float32')
# self.assertRaises(Exception, test_float_in_index) var = fluid.dygraph.to_variable(data)
sliced = var[:, 10:, :var.shape[1]] # var.shape[1] is 80L here
# class TestInferShape(unittest.TestCase): self.assertEqual(sliced.shape, [2, 70, 80])
# def test(self):
# x = paddle.ones(shape=[3, 4, 5]) sliced = var[:, var.shape[0]:, var.shape[0]:var.shape[1]]
# x.desc.set_shape([3, -1, 5]) self.assertEqual(sliced.shape, [2, 78, 78])
# self.assertEqual(x.shape, (3, -1, 5))
def test_getitem_with_float(self):
# out0 = paddle.slice(x, axes=[1], starts=[0], ends=[3]) def test_float_in_slice_item():
# self.assertEqual(out0.shape, (3, 3, 5)) with fluid.dygraph.guard():
data = np.random.random((2, 80, 16128)).astype('float32')
# def test_axis_less_than_zero(self): var = fluid.dygraph.to_variable(data)
sliced = var[:, 1.1:, :var.shape[1]]
# # Using paddle.disable_static will make other unittests fail.
# with fluid.dygraph.guard(): self.assertRaises(Exception, test_float_in_slice_item)
# x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4])
# x = paddle.to_tensor(x_arr) def test_float_in_index():
with fluid.dygraph.guard():
# pp_slice = paddle.slice(x, [100, ], [0], [1]) data = np.random.random((2, 80, 16128)).astype('float32')
# np_slice = x_arr[:, :, 0:1] var = fluid.dygraph.to_variable(data)
# self.assertTrue(np.array_equal(pp_slice, np_slice)) sliced = var[1.1]
# pp_slice = paddle.slice(x, (-100, ), [0], [1]) self.assertRaises(Exception, test_float_in_index)
# np_slice = x_arr[0:1]
# self.assertTrue(np.array_equal(pp_slice, np_slice))
class TestInferShape(unittest.TestCase):
# x_arr = np.array([], dtype=np.float32) def test(self):
# x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0))) x = paddle.ones(shape=[3, 4, 5])
x.desc.set_shape([3, -1, 5])
# starts = paddle.to_tensor( self.assertEqual(x.shape, (3, -1, 5))
# np.reshape(
# np.array( out0 = paddle.slice(x, axes=[1], starts=[0], ends=[3])
# [], dtype=np.int32), (0, ))) self.assertEqual(out0.shape, (3, 3, 5))
# ends = paddle.to_tensor(
# np.reshape( def test_axis_less_than_zero(self):
# np.array(
# [], dtype=np.int32), (0, ))) # Using paddle.disable_static will make other unittests fail.
with fluid.dygraph.guard():
# with self.assertRaises(ValueError): x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4])
# paddle.slice(x, [-1000000], starts, ends) x = paddle.to_tensor(x_arr)
# with self.assertRaises(ValueError): pp_slice = paddle.slice(x, [100, ], [0], [1])
# paddle.slice(x, [1000000], starts, ends) np_slice = x_arr[:, :, 0:1]
self.assertTrue(np.array_equal(pp_slice, np_slice))
# with self.assertRaises(ValueError):
# paddle.slice(x, [], starts, ends) pp_slice = paddle.slice(x, (-100, ), [0], [1])
np_slice = x_arr[0:1]
# with self.assertRaises(ValueError): self.assertTrue(np.array_equal(pp_slice, np_slice))
# paddle.slice(x, 0, starts, ends)
x_arr = np.array([], dtype=np.float32)
# @unittest.skipIf(not core.is_compiled_with_cuda(), x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0)))
# "core is not compiled with CUDA")
# class TestImperativeCUDAPinnedInput(unittest.TestCase): starts = paddle.to_tensor(
# def test_input_cuda_pinned_var(self): np.reshape(
# with fluid.dygraph.guard(): np.array(
# data = np.random.random((2, 80, 16128)).astype('float32') [], dtype=np.int32), (0, )))
# var = core.VarBase( ends = paddle.to_tensor(
# value=data, np.reshape(
# name='', np.array(
# persistable=False, [], dtype=np.int32), (0, )))
# place=fluid.CUDAPinnedPlace(),
# zero_copy=False) with self.assertRaises(ValueError):
# sliced = var[:, 10:, :var.shape[1]] paddle.slice(x, [-1000000], starts, ends)
# self.assertEqual(sliced.shape, [2, 70, 80])
with self.assertRaises(ValueError):
paddle.slice(x, [1000000], starts, ends)
with self.assertRaises(ValueError):
paddle.slice(x, [], starts, ends)
with self.assertRaises(ValueError):
paddle.slice(x, 0, starts, ends)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestImperativeCUDAPinnedInput(unittest.TestCase):
def test_input_cuda_pinned_var(self):
with fluid.dygraph.guard():
data = np.random.random((2, 80, 16128)).astype('float32')
var = core.VarBase(
value=data,
name='',
persistable=False,
place=fluid.CUDAPinnedPlace(),
zero_copy=False)
sliced = var[:, 10:, :var.shape[1]]
self.assertEqual(sliced.shape, [2, 70, 80])
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册