提交 09b0c04d 编写于 作者: L liuruilong

generate graph

上级 f4126618
......@@ -51,8 +51,8 @@ paddle-mobile.cbp
.idea
compile_commands.json
cmake-build-debug/
test/models/
\ No newline at end of file
......@@ -34,15 +34,19 @@ SOFTWARE.
namespace paddle_mobile {
namespace framework {
static std::unordered_map<std::string, std::vector<std::string>>
op_input_output_key = {
{"conv2d", {"Input", "Output"}}, {"relu", {"X", "Out"}},
{"softmax", {"X", "Out"}}, {"mul", {"X", "Out"}},
{"elementwise_add", {"X", "Out"}}, {"pool2d", {"X", "Out"}},
{"batch_norm", {"X", "Y"}}, {"lrn", {"X", "Out"}},
{"concat", {"X", "Out"}},
};
static std::unordered_map<
std::string, std::pair<std::vector<std::string>, std::vector<std::string>>>
op_input_output_key = {{"conv2d", {{"Input"}, {"Output"}}},
{"relu", {{"X"}, {"Out"}}},
{"softmax", {{"X"}, {"Out"}}},
{"mul", {{"X"}, {"Out"}}},
{"elementwise_add", {{"X", "Y"}, {"Out"}}},
{"pool2d", {{"X"}, {"Out"}}},
{"batch_norm", {{"X"}, {"Y"}}},
{"lrn", {{"X"}, {"Out"}}},
{"concat", {{"X"}, {"Out"}}},
{"feed", {{"X"}, {"Out"}}},
{"fetch", {{"X"}, {"Out"}}}};
template <typename Dtype> class OperatorBase : PaddleMobileObject {
public:
......
......@@ -26,6 +26,8 @@ namespace framework {
Node &Node::operator>(std::shared_ptr<Node> node) {
outputs_.push_back(node);
std::shared_ptr<Node> this_node;
node->inputs_.push_back(this);
return *node;
}
......@@ -46,16 +48,23 @@ bool Node::operator==(const Node &in) {
return true;
}
std::string Node::ToString(std::string blank) const {
std::string Node::ToString(std::string blank, const Node *node) const {
std::stringstream ss;
ss << type_ << "-> \n";
if (inputs_.size() > 1 && node != inputs_.back()) {
return ss.str();
} else if (inputs_.size() > 1 && node == inputs_.back()) {
ss << "\n" << blank << type_ << "\n";
}
for (int i = 0; i < outputs_.size(); ++i) {
ss << blank << outputs_[i]->ToString(blank + " ") << "";
ss << blank << outputs_[i]->ToString(blank + " ", this) << "";
}
return ss.str();
}
std::string Node::ToString() const { return this->ToString(" "); }
std::string Node::ToString() const { return this->ToString(" ", this); }
Node &Node::To(int index) {
if (index == 0) {
......
......@@ -41,8 +41,9 @@ class Node : PaddleMobileObject {
private:
std::shared_ptr<OpDesc> op_desc_;
std::string ToString(std::string blank) const;
std::string ToString(std::string blank, const Node *node) const;
std::vector<std::shared_ptr<Node>> outputs_;
std::vector<Node *> inputs_;
std::string type_;
};
......
......@@ -21,29 +21,54 @@ SOFTWARE.
namespace paddle_mobile {
namespace framework {
std::shared_ptr<ProgramDesc> ProgramOptimize::Optimize() {}
std::shared_ptr<ProgramDesc>
ProgramOptimize::FushionOptimize(std::shared_ptr<ProgramDesc> ori_des) {
for (int i = 0; i < ori_des->Blocks().size(); ++i) {
std::unordered_map<std::string, std::shared_ptr<Node>> output_nodes;
std::shared_ptr<Node> begin_node;
auto block = ori_des->Block(i);
// DLOG << " ops size: " << block->Ops().size();
for (int j = 0; j < block->Ops().size(); ++j) {
auto op = block->Ops()[j];
auto op_type = op->Type();
// DLOG << "op type: " << op_type << " index: " << j;
if (op_input_output_key.find(op->Type()) ==
op_input_output_key.end()) {
return NULL;
}
std::shared_ptr<Node> node = std::make_shared<Node>(op);
auto op_outputs = op->Output(op_input_output_key.at(op->Type())[1]);
for (int k = 0; k < op_outputs.size(); ++k) {
output_nodes[op_outputs[k]] = node;
if (j == 0) {
begin_node = node;
}
auto input_keys = op_input_output_key.at(op->Type()).first;
for (auto input_key : input_keys) {
auto op_inputs = op->Input(input_key);
for (int l = 0; l < op_inputs.size(); ++l) {
std::string input_key = op_inputs[l];
if (output_nodes.find(input_key) != output_nodes.end()) {
auto input_node = output_nodes[input_key];
*input_node > node;
}
}
}
auto op_iutputs = op->Output(op_input_output_key.at(op->Type())[0]);
for (int l = 0; l < op_iutputs.size(); ++l) {
auto input_node = output_nodes[op_iutputs[l]];
*input_node > node;
auto output_keys = op_input_output_key.at(op_type).second;
for (auto output_key : output_keys) {
auto op_outputs = op->Output(output_key);
for (int k = 0; k < op_outputs.size(); ++k) {
output_nodes[op_outputs[k]] = node;
}
}
}
DLOG << output_nodes["feed"];
DLOG << "node: \n" << *begin_node;
}
return ori_des;
}
}
}
} // namespace framework
} // namespace paddle_mobile
......@@ -33,9 +33,9 @@ class ProgramOptimize {
FushionOptimize(std::shared_ptr<ProgramDesc> ori_des);
private:
// std::shared_ptr<ProgramDesc> ori_desc_;
// std::shared_ptr<ProgramDesc> ori_desc_;
std::vector<std::unordered_map<std::string, std::shared_ptr<Node>>>
outputs_nodes_;
};
}
} // namespace framework
} // namespace paddle_mobile
......@@ -195,76 +195,55 @@ Loader<Dtype, P>::Load(const std::string &dirname) {
LOG(kLOG_DEBUG) << "block: " << block.idx();
for (int j = 0; j < block.ops().size(); ++j) {
framework::proto::OpDesc op = block.ops()[j];
LOG(kLOG_DEBUG1) << " op: " << op.type();
LOG(kLOG_DEBUG1) << "op: " << op.type();
for (int m = 0; m < op.inputs_size(); ++m) {
const framework::proto::OpDesc::Var &var = op.inputs(m);
LOG(kLOG_DEBUG2) << " input parameter: " << var.parameter();
LOG(kLOG_DEBUG2) << "input parameter: " << var.parameter();
for (int n = 0; n < var.arguments().size(); ++n) {
LOG(kLOG_DEBUG3) << " argument - " << var.arguments()[n];
LOG(kLOG_DEBUG3) << "argument - " << var.arguments()[n];
}
}
for (int y = 0; y < op.outputs_size(); ++y) {
const framework::proto::OpDesc::Var &var = op.outputs(y);
LOG(kLOG_DEBUG2) << " out parameter: " << var.parameter();
LOG(kLOG_DEBUG2) << "out parameter: " << var.parameter();
for (int z = 0; z < var.arguments().size(); ++z) {
LOG(kLOG_DEBUG3) << " argument - " << var.arguments()[z];
LOG(kLOG_DEBUG3) << "argument - " << var.arguments()[z];
}
}
for (int x = 0; x < op.attrs().size(); ++x) {
const framework::proto::OpDesc_Attr attr = op.attrs()[x];
// std::cout << " attr name: " << attr.name() <<
// std::endl;
// std::cout << " attr type: " << attr.type() <<
// std::endl;
LOG(kLOG_DEBUG2) << "attr name: " << attr.name();
switch (attr.type()) {
case framework::proto::AttrType::BOOLEAN:
// std::cout << " boolen: " << attr.b() <<
// std::endl;
LOG(kLOG_DEBUG3) << "boolen: " << attr.b();
break;
case framework::proto::AttrType::INT:
// std::cout << " int: " << attr.i() <<
// std::endl;
LOG(kLOG_DEBUG3) << "int: " << attr.i();
break;
case framework::proto::AttrType::FLOAT:
// std::cout << " float: " << attr.f() <<
// std::endl;
LOG(kLOG_DEBUG3) << "float: " << attr.f();
case framework::proto::AttrType::STRING:
// std::cout << " string: " << attr.s() <<
// std::endl;
LOG(kLOG_DEBUG3) << "string: " << attr.s();
case framework::proto::AttrType::BOOLEANS:
// std::vector<bool>
// bools(attr.bools_size());
for (int y = 0; y < attr.bools_size(); ++y) {
// std::cout << " bool - " <<
// attr.bools(y) <<
// std::endl;
LOG(kLOG_DEBUG3) << "bools: " << attr.bools(y);
}
case framework::proto::AttrType::LONG:
// std::cout << " long: " << attr.l() <<
// std::endl;
LOG(kLOG_DEBUG3) << "long: " << attr.l();
case framework::proto::AttrType::FLOATS:
for (int y = 0; y < attr.floats_size(); ++y) {
// std::cout << " float - " << y <<
// ": " <<
// attr.floats(y)
// << std::endl;
LOG(kLOG_DEBUG3) << "floats: " << attr.floats(y);
}
case framework::proto::AttrType::INTS:
for (int y = 0; y < attr.ints_size(); ++y) {
// std::cout << " int - " << y << ":
// " <<
// attr.ints(y)
// << std::endl;
LOG(kLOG_DEBUG3) << "ints: " << attr.ints(y);
}
case framework::proto::AttrType::STRINGS:
for (int y = 0; y < attr.strings_size(); ++y) {
// std::cout << " string - " << y <<
// ": " <<
// attr.strings(y)
// << std::endl;
LOG(kLOG_DEBUG3) << "strings: " << attr.strings(y);
}
}
}
......@@ -273,19 +252,15 @@ Loader<Dtype, P>::Load(const std::string &dirname) {
for (int k = 0; k < block.vars().size(); ++k) {
framework::proto::VarDesc var = block.vars()[k];
if (var.type().type() == framework::proto::VarType::LOD_TENSOR) {
// std::cout << " var name: " << var.name() <<
// std::endl;
LOG(kLOG_DEBUG1) << "var name: " << var.name();
const framework::proto::VarType::TensorDesc &tensor_desc =
var.type().lod_tensor().tensor();
// std::cout << " in var tensor desc dims size "
// << tensor_desc.dims().size() <<
// std::endl;
LOG(kLOG_DEBUG2) << "in var tensor desc dims size: "
<< tensor_desc.dims().size();
int memory_size = 1;
for (int l = 0; l < tensor_desc.dims().size(); ++l) {
// std::cout << " var tensor desc dim " << l
// << " value: " <<
// tensor_desc.dims()[l] <<
// std::endl;
LOG(kLOG_DEBUG3) << "var tensor desc dim " << l
<< " value: " << tensor_desc.dims()[l];
}
}
......
......@@ -15,7 +15,6 @@ target_link_libraries(test-log paddle-mobile)
ADD_EXECUTABLE(test-load framework/test_load.cpp)
target_link_libraries(test-load paddle-mobile)
# gen test log
ADD_EXECUTABLE(test-optimize framework/test_optimize.cpp)
target_link_libraries(test-optimize paddle-mobile)
\ No newline at end of file
target_link_libraries(test-optimize paddle-mobile)
......@@ -23,6 +23,6 @@ int main() {
//../../../test/models/googlenet
//../../../test/models/mobilenet
auto program = loader.Load(std::string("../../../test/models/googlenet"));
auto program = loader.Load(std::string("../models/googlenet"));
return 0;
}
\ No newline at end of file
......@@ -27,23 +27,14 @@ int main() {
Loader<paddle_mobile::CPU> loader;
// "../../../test/models/googlenet"
auto program = loader.Load("../models/googlenet");
auto program = loader.Load("../../../test/models/googlenet");
ProgramOptimize optimize;
optimize.FushionOptimize(program.originProgram);
auto optimize_program = optimize.FushionOptimize(program.originProgram);
if (optimize_program) {
Node node("conv");
node > std::make_shared<Node>("add") > std::make_shared<Node>("relu") >
std::make_shared<Node>("lrn");
node > std::make_shared<Node>("batch normal");
DLOG << "depath of node " << node.depth();
// Node node1("conv");
// node1 > Node("add") > Node("relu");
Node node2 = node.To(4);
DLOG << "\n" << node2;
} else {
DLOG << "optimize_program is null";
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册