提交 03b31c6c 编写于 作者: L liuruilong

format files

上级 03954e0c
......@@ -77,7 +77,8 @@ static const std::string G_OP_TYPE_BATCHNORM = "batch_norm";
static const std::string G_OP_TYPE_BOX_CODER = "box_coder";
static const std::string G_OP_TYPE_CONCAT = "concat";
static const std::string G_OP_TYPE_ELEMENTWISE_ADD = "elementwise_add";
static const std::string G_OP_TYPE_FUSION_CONV_ADD_RELU = "fusion_conv_add_relu";
static const std::string G_OP_TYPE_FUSION_CONV_ADD_RELU =
"fusion_conv_add_relu";
static const std::string G_OP_TYPE_FC = "fc";
static const std::string G_OP_TYPE_LRN = "lrn";
static const std::string G_OP_TYPE_MUL = "mul";
......
......@@ -51,9 +51,9 @@ bool Node::CanSplit(std::unordered_set<std::string> complex_compute_set) {
return split;
}
void Node::CanSplit(bool *split, bool spliting,
int complex_count,
std::unordered_set<std::string> *complex_compute_set, Node *pre_node) {
void Node::CanSplit(bool *split, bool spliting, int complex_count,
std::unordered_set<std::string> *complex_compute_set,
Node *pre_node) {
if (spliting) {
if (complex_compute_set->find(this->type_) != complex_compute_set->end()) {
complex_count++;
......
......@@ -16,9 +16,9 @@ limitations under the License. */
#include <map>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include <unordered_set>
#include "common/log.h"
#include "framework/paddle_mobile_object.h"
......@@ -51,9 +51,9 @@ class Node : PaddleMobileObject {
void Description();
private:
void CanSplit(bool *split, bool spliting,
int complex_count,
std::unordered_set<std::string> *complex_compute_set, Node *pre_node);
void CanSplit(bool *split, bool spliting, int complex_count,
std::unordered_set<std::string> *complex_compute_set,
Node *pre_node);
void OpDescs(std::vector<std::shared_ptr<framework::OpDesc>> *op_desc,
Node *node, bool adding_thread, int thread_num);
void OpDescs(uint size,
......
......@@ -99,7 +99,8 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
// DLOG << "node: \n" << *begin_node;
std::vector<std::shared_ptr<framework::OpDesc>> op_descs;
// bool can_splite = begin_node->CanSplit({G_OP_TYPE_CONV, G_OP_TYPE_BATCHNORM, G_OP_TYPE_DEPTHWISE_CONV});
// bool can_splite = begin_node->CanSplit({G_OP_TYPE_CONV,
// G_OP_TYPE_BATCHNORM, G_OP_TYPE_DEPTHWISE_CONV});
GenerateOps(&op_descs, begin_node.get());
block->ops_ = op_descs;
}
......@@ -112,11 +113,9 @@ std::shared_ptr<ProgramDesc> ProgramOptimize::FushionOptimize(
return optimize_program;
}
void ProgramOptimize::GenerateOps(
std::vector<std::shared_ptr<framework::OpDesc>> *op_desc, Node *input_node,
Node *current_node) {
std::vector<std::shared_ptr<framework::OpDesc>> *op_desc, Node *input_node,
Node *current_node) {
if (current_node->inputs_.size() > 1 &&
input_node != current_node->inputs_.back()) {
return;
......@@ -131,7 +130,6 @@ void ProgramOptimize::GenerateOps(
auto &output = current_node->outputs_[i];
GenerateOps(op_desc, current_node, output.get());
}
}
void ProgramOptimize::GenerateOps(
......
......@@ -35,9 +35,8 @@ class ProgramOptimize {
std::vector<std::shared_ptr<BlockDesc>> new_blocks_;
void GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>> *op_descs,
Node *begin_node);
void GenerateOps(
std::vector<std::shared_ptr<framework::OpDesc>> *op_desc, Node *input_node,
Node *current_node);
void GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>> *op_desc,
Node *input_node, Node *current_node);
void GenerateOps(std::vector<std::shared_ptr<framework::OpDesc>> *op_desc,
Node *input_node, Node *current_node, bool adding_thread,
int thread_num, std::shared_ptr<BlockDesc> new_block);
......
......@@ -232,7 +232,6 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::Load(
originProgramDesc->Description("program: ");
}
paddle_mobile__framework__proto__program_desc__free_unpacked(c_program, NULL);
return program;
}
......
......@@ -32,7 +32,7 @@ class FushionConvAddReluOpMatcher : public framework::FusionOpMatcher {
std::vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node->OpDescs(node_.Depth());
node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_ELEMENTWISE_ADD, {"Y", "Z"}}});
{{G_OP_TYPE_ELEMENTWISE_ADD, {"Y", "Z"}}});
}
std::string Type() { return G_OP_TYPE_FUSION_CONV_ADD_RELU; }
};
......
......@@ -32,11 +32,11 @@ class FusionFcMatcher : public framework::FusionOpMatcher {
node_ > std::make_shared<framework::Node>(G_OP_TYPE_ELEMENTWISE_ADD);
}
void FolderNodes(framework::Node *node) {
void FolderNodes(framework::Node *node) {
vector<std::shared_ptr<framework::OpDesc>> origin_descs =
node->OpDescs(node_.Depth());
node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_ELEMENTWISE_ADD, {"Y", "Z"}}});
{{G_OP_TYPE_ELEMENTWISE_ADD, {"Y", "Z"}}});
}
std::string Type() { return G_OP_TYPE_FC; }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册