未验证 提交 493ea2ca 编写于 作者: myq406450149's avatar myq406450149 提交者: GitHub

split op support param can be tensor or tensorlist,test=develop (#2474)

* split op upgrade
上级 a5e93766
...@@ -42,5 +42,9 @@ void SplitCompute::Run() { ...@@ -42,5 +42,9 @@ void SplitCompute::Run() {
REGISTER_LITE_KERNEL( REGISTER_LITE_KERNEL(
split, kARM, kFloat, kNCHW, paddle::lite::kernels::arm::SplitCompute, def) split, kARM, kFloat, kNCHW, paddle::lite::kernels::arm::SplitCompute, def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kARM))}) .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM))})
.BindInput("AxisTensor",
{LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))})
.BindInput("SectionsTensorList",
{LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM))})
.Finalize(); .Finalize();
...@@ -344,6 +344,9 @@ struct DropoutParam { ...@@ -344,6 +344,9 @@ struct DropoutParam {
struct SplitParam { struct SplitParam {
lite::Tensor* x{}; lite::Tensor* x{};
std::vector<lite::Tensor*> output{}; std::vector<lite::Tensor*> output{};
lite::Tensor* axis_tensor;
std::vector<lite::Tensor*> sections_tensor_list{};
int axis{-1}; int axis{-1};
int num{0}; int num{0};
std::vector<int> sections; std::vector<int> sections;
......
...@@ -39,8 +39,16 @@ bool SplitOp::InferShape() const { ...@@ -39,8 +39,16 @@ bool SplitOp::InferShape() const {
const int outs_number = outs.size(); const int outs_number = outs.size();
std::vector<lite::DDim> outs_dims; std::vector<lite::DDim> outs_dims;
outs_dims.reserve(outs_number); outs_dims.reserve(outs_number);
std::vector<lite::Tensor *> sections_tensor_list_ =
if (num > 0) { param_.sections_tensor_list;
if (sections.size() > 0 && sections_tensor_list_.size() > 0) {
std::vector<int> vec_sections;
for (size_t i = 0; i < sections_tensor_list_.size(); ++i) {
auto dim = in_dims;
dim[axis] = sections_tensor_list_[i]->data<int>()[0];
outs_dims.push_back(dim);
}
} else if (num > 0) {
int out_axis_dim = in_dims[axis] / num; int out_axis_dim = in_dims[axis] / num;
for (int i = 0; i < outs_number; ++i) { for (int i = 0; i < outs_number; ++i) {
auto dim = in_dims; auto dim = in_dims;
...@@ -55,6 +63,10 @@ bool SplitOp::InferShape() const { ...@@ -55,6 +63,10 @@ bool SplitOp::InferShape() const {
} }
} }
if (param_.axis_tensor != nullptr) {
axis = param_.axis_tensor->data<int>()[0];
}
for (int j = 0; j < outs_dims.size(); ++j) { for (int j = 0; j < outs_dims.size(); ++j) {
outs[j]->Resize(outs_dims[j]); outs[j]->Resize(outs_dims[j]);
} }
...@@ -73,6 +85,21 @@ bool SplitOp::AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) { ...@@ -73,6 +85,21 @@ bool SplitOp::AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) {
for (auto var : outs) { for (auto var : outs) {
param_.output.push_back(scope->FindVar(var)->GetMutable<lite::Tensor>()); param_.output.push_back(scope->FindVar(var)->GetMutable<lite::Tensor>());
} }
std::vector<std::string> input_arg_names = opdesc.InputArgumentNames();
if (std::find(input_arg_names.begin(), input_arg_names.end(), "AxisTensor") !=
input_arg_names.end()) {
auto args = opdesc.Input("AxisTensor");
auto *var = scope->FindVar(args.front());
param_.axis_tensor = var->GetMutable<lite::Tensor>();
}
if (std::find(input_arg_names.begin(),
input_arg_names.end(),
"SectionsTensorList") != input_arg_names.end()) {
auto args = opdesc.Input("SectionsTensorList");
auto *var = scope->FindVar(args.front());
param_.sections_tensor_list =
*(var->GetMutable<std::vector<lite::Tensor *>>());
}
return true; return true;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册