提交 deef88cb 编写于 作者: L liuruilong

format files

上级 b07f7b2d
...@@ -25,9 +25,7 @@ std::vector<std::shared_ptr<VarDesc>> BlockDesc::Vars() const { ...@@ -25,9 +25,7 @@ std::vector<std::shared_ptr<VarDesc>> BlockDesc::Vars() const {
return res; return res;
} }
std::vector<std::shared_ptr<OpDesc>> BlockDesc::Ops() const { std::vector<std::shared_ptr<OpDesc>> BlockDesc::Ops() const { return ops_; }
return ops_;
}
BlockDesc::BlockDesc(PaddleMobile__Framework__Proto__BlockDesc *desc) BlockDesc::BlockDesc(PaddleMobile__Framework__Proto__BlockDesc *desc)
: index_(desc->idx), parent_index_(desc->idx) { : index_(desc->idx), parent_index_(desc->idx) {
......
...@@ -132,8 +132,7 @@ void Node::OpDescs(std::vector<std::shared_ptr<framework::OpDesc>> *op_desc, ...@@ -132,8 +132,7 @@ void Node::OpDescs(std::vector<std::shared_ptr<framework::OpDesc>> *op_desc,
if (can_add_split) { if (can_add_split) {
adding_thread = true; adding_thread = true;
std::shared_ptr<OpDesc> split_op_desc = std::shared_ptr<OpDesc> split_op_desc = std::make_shared<OpDesc>();
std::make_shared<OpDesc>();
split_op_desc->type_ = G_OP_TYPE_SPLIT; split_op_desc->type_ = G_OP_TYPE_SPLIT;
auto outputs = this->op_desc_->Output( auto outputs = this->op_desc_->Output(
op_input_output_key[this->op_desc_->Type()].second[0]); op_input_output_key[this->op_desc_->Type()].second[0]);
......
...@@ -28,6 +28,7 @@ namespace framework { ...@@ -28,6 +28,7 @@ namespace framework {
class Node : PaddleMobileObject { class Node : PaddleMobileObject {
friend class ProgramOptimize; friend class ProgramOptimize;
public: public:
Node() {} Node() {}
explicit Node(const std::string &type) : type_(type) {} explicit Node(const std::string &type) : type_(type) {}
......
...@@ -21,9 +21,9 @@ namespace framework { ...@@ -21,9 +21,9 @@ namespace framework {
std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize( std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
std::shared_ptr<ProgramDesc> ori_des, bool add_split) { std::shared_ptr<ProgramDesc> ori_des, bool add_split) {
// ProgramDesc *optimize_program = new ProgramDesc(*ori_des);
// ProgramDesc *optimize_program = new ProgramDesc(*ori_des); std::shared_ptr<ProgramDesc> optimize_program =
std::shared_ptr<ProgramDesc> optimize_program = std::make_shared<ProgramDesc>(*ori_des); std::make_shared<ProgramDesc>(*ori_des);
current_block_ = optimize_program->Blocks().size(); current_block_ = optimize_program->Blocks().size();
for (int i = 0; i < optimize_program->Blocks().size(); ++i) { for (int i = 0; i < optimize_program->Blocks().size(); ++i) {
...@@ -98,7 +98,6 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize( ...@@ -98,7 +98,6 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
// DLOG << "node: \n" << *begin_node; // DLOG << "node: \n" << *begin_node;
std::vector<std::shared_ptr<framework::OpDesc>> op_descs; std::vector<std::shared_ptr<framework::OpDesc>> op_descs;
GenerateOps(&op_descs, begin_node.get()); GenerateOps(&op_descs, begin_node.get());
block->ops_ = op_descs; block->ops_ = op_descs;
...@@ -112,13 +111,10 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize( ...@@ -112,13 +111,10 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
return optimize_program; return optimize_program;
} }
void ProgramOptimize::GenerateOps(
void ProgramOptimize::GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>> *op_desc, std::vector<std::shared_ptr<framework::OpDesc>> *op_desc, Node *input_node,
Node *input_node, Node *current_node, bool adding_thread, int thread_num,
Node *current_node, std::shared_ptr<BlockDesc> new_block) {
bool adding_thread,
int thread_num,
std::shared_ptr<BlockDesc> new_block) {
if (current_node->outputs_.size() > 1) { if (current_node->outputs_.size() > 1) {
adding_thread = false; adding_thread = false;
} }
...@@ -147,7 +143,7 @@ void ProgramOptimize::GenerateOps(std::vector<std::shared_ptr<framework::OpDesc> ...@@ -147,7 +143,7 @@ void ProgramOptimize::GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>
//判断现在 是否存在这个 op //判断现在 是否存在这个 op
//判断这个 output 和 input key 的 size 等于 1 //判断这个 output 和 input key 的 size 等于 1
if (op_input_output_key.find(op_desc->type_) != if (op_input_output_key.find(op_desc->type_) !=
op_input_output_key.end() && op_input_output_key.end() &&
inputs_and_outputs.first.size() == 1 && inputs_and_outputs.first.size() == 1 &&
inputs_and_outputs.second.size() == 1) { inputs_and_outputs.second.size() == 1) {
auto inputs_of_output = op_desc->Input(inputs_and_outputs.first[0]); auto inputs_of_output = op_desc->Input(inputs_and_outputs.first[0]);
...@@ -172,9 +168,11 @@ void ProgramOptimize::GenerateOps(std::vector<std::shared_ptr<framework::OpDesc> ...@@ -172,9 +168,11 @@ void ProgramOptimize::GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>
} }
} }
if (current_node->inputs_.size() > 1 && input_node != current_node->inputs_.back()) { if (current_node->inputs_.size() > 1 &&
input_node != current_node->inputs_.back()) {
return; return;
} else if (current_node->inputs_.size() > 1 && input_node == current_node->inputs_.back()) { } else if (current_node->inputs_.size() > 1 &&
input_node == current_node->inputs_.back()) {
new_block.reset(); new_block.reset();
adding_thread = false; adding_thread = false;
op_desc->push_back(current_node->op_desc_); op_desc->push_back(current_node->op_desc_);
...@@ -191,8 +189,6 @@ void ProgramOptimize::GenerateOps(std::vector<std::shared_ptr<framework::OpDesc> ...@@ -191,8 +189,6 @@ void ProgramOptimize::GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>
current_node->op_desc_->attrs_["thread"] = attr; current_node->op_desc_->attrs_["thread"] = attr;
} }
if (can_add_split) { if (can_add_split) {
new_block = std::make_shared<BlockDesc>(); new_block = std::make_shared<BlockDesc>();
new_block->multi_thread_ = true; new_block->multi_thread_ = true;
...@@ -200,15 +196,14 @@ void ProgramOptimize::GenerateOps(std::vector<std::shared_ptr<framework::OpDesc> ...@@ -200,15 +196,14 @@ void ProgramOptimize::GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>
new_blocks_.push_back(new_block); new_blocks_.push_back(new_block);
adding_thread = true; adding_thread = true;
std::shared_ptr<OpDesc> split_op_desc = std::shared_ptr<OpDesc> split_op_desc = std::make_shared<OpDesc>();
std::make_shared<OpDesc>();
split_op_desc->type_ = G_OP_TYPE_SPLIT; split_op_desc->type_ = G_OP_TYPE_SPLIT;
auto outputs = current_node->op_desc_->Output( auto outputs = current_node->op_desc_->Output(
op_input_output_key[current_node->op_desc_->Type()].second[0]); op_input_output_key[current_node->op_desc_->Type()].second[0]);
split_op_desc->inputs_ = { split_op_desc->inputs_ = {
{op_input_output_key[G_OP_TYPE_SPLIT].first[0], outputs}}; {op_input_output_key[G_OP_TYPE_SPLIT].first[0], outputs}};
auto &split_outputs = auto &split_outputs =
split_op_desc->outputs_[op_input_output_key[G_OP_TYPE_SPLIT].second[0]]; split_op_desc->outputs_[op_input_output_key[G_OP_TYPE_SPLIT].second[0]];
for (const auto &output : current_node->outputs_) { for (const auto &output : current_node->outputs_) {
split_outputs.push_back(outputs[0]); split_outputs.push_back(outputs[0]);
} }
...@@ -224,19 +219,21 @@ void ProgramOptimize::GenerateOps(std::vector<std::shared_ptr<framework::OpDesc> ...@@ -224,19 +219,21 @@ void ProgramOptimize::GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>
for (int i = 0; i < current_node->outputs_.size(); ++i) { for (int i = 0; i < current_node->outputs_.size(); ++i) {
auto &output = current_node->outputs_[i]; auto &output = current_node->outputs_[i];
if (can_add_split) { if (can_add_split) {
GenerateOps(op_desc, current_node, output.get(), adding_thread, i, new_block); GenerateOps(op_desc, current_node, output.get(), adding_thread, i,
new_block);
} else { } else {
GenerateOps(op_desc, current_node, output.get(), adding_thread, thread_num, new_block); GenerateOps(op_desc, current_node, output.get(), adding_thread,
thread_num, new_block);
} }
} }
} }
void ProgramOptimize::GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>> *op_descs, void ProgramOptimize::GenerateOps(
Node *begin_node) { std::vector<std::shared_ptr<framework::OpDesc>> *op_descs,
Node *begin_node) {
// std::vector<std::shared_ptr<framework::OpDesc>> *op_desc,
//std::vector<std::shared_ptr<framework::OpDesc>> *op_desc, // Node *input_node, Node *current_node, bool adding_thread, int
// Node *input_node, Node *current_node, bool adding_thread, int thread_num // thread_num
this->GenerateOps(op_descs, begin_node, begin_node, false, -1, nullptr); this->GenerateOps(op_descs, begin_node, begin_node, false, -1, nullptr);
} }
......
...@@ -37,11 +37,8 @@ class ProgramOptimize { ...@@ -37,11 +37,8 @@ class ProgramOptimize {
void GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>> *op_descs, void GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>> *op_descs,
Node *begin_node); Node *begin_node);
void GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>> *op_desc, void GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>> *op_desc,
Node *input_node, Node *input_node, Node *current_node, bool adding_thread,
Node *current_node, int thread_num, std::shared_ptr<BlockDesc> new_block);
bool adding_thread,
int thread_num,
std::shared_ptr<BlockDesc> new_block);
}; };
} // namespace framework } // namespace framework
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -75,7 +75,6 @@ void ProgramDesc::Description(std::string header) { ...@@ -75,7 +75,6 @@ void ProgramDesc::Description(std::string header) {
} }
for (const auto &block : this->blocks_) { for (const auto &block : this->blocks_) {
} }
#endif #endif
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册