diff --git a/paddle/fluid/framework/paddle2cinn/cinn_cache_key.cc b/paddle/fluid/framework/paddle2cinn/cinn_cache_key.cc index 923282c59e2d4aa35770b0f134137d1cfc2d24d2..368fb4a5fd8c96c03a52249b8243212380bc64ab 100644 --- a/paddle/fluid/framework/paddle2cinn/cinn_cache_key.cc +++ b/paddle/fluid/framework/paddle2cinn/cinn_cache_key.cc @@ -14,13 +14,16 @@ #include "paddle/fluid/framework/paddle2cinn/cinn_cache_key.h" +#include +#include #include +#include #include #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/ir/graph.h" -#include "paddle/fluid/framework/ir/graph_helper.h" #include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/inference/analysis/dot.h" namespace paddle { namespace framework { @@ -39,13 +42,42 @@ CinnCacheKey::CinnCacheKey(const ir::Graph& graph, this->SetKey(graph, input_shapes, arch_str); } +size_t CinnCacheKey::HashGraph(const ir::Graph& graph) { + // using Dot to unqiue graph + inference::analysis::Dot dot; + std::unordered_map node2dot; + int id = 0; + // Create nodes + // graph.Nodes() return unordered_set, the same graph may + // return different result? + for (const ir::Node* n : graph.Nodes()) { + std::string node_id = std::to_string(id++); + dot.AddNode(node_id, {}, n->Name(), true); + node2dot[n] = node_id; + } + + // Create edges + for (const ir::Node* n : graph.Nodes()) { + const auto& src_id = node2dot.at(n); + for (auto* out : n->outputs) { + const auto& dest_id = node2dot.at(out); + dot.AddEdge(src_id, dest_id, {}); + } + } + + const std::string& viz_graph = dot.Build(); + VLOG(1) << "The hash graph:\n" << viz_graph; + + size_t hash_val = std::hash()(viz_graph); + VLOG(4) << "The graph's hash value is: " << hash_val; + return hash_val; +} + void CinnCacheKey::SetKey( const ir::Graph& graph, const std::map& input_tensors, const std::string& arch_str) { - ProgramDesc program; - GraphToProgram(graph, &program); - program.Proto()->SerializeToString(&graph_serialize_str_); + graph_serialize_str_ = std::to_string(HashGraph(graph)); for (const auto& name_tensor : input_tensors) { input_shapes_[name_tensor.first] = name_tensor.second->dims(); } @@ -55,9 +87,7 @@ void CinnCacheKey::SetKey( void CinnCacheKey::SetKey(const ir::Graph& graph, const std::map& input_shapes, const std::string& arch_str) { - ProgramDesc program; - GraphToProgram(graph, &program); - program.Proto()->SerializeToString(&graph_serialize_str_); + graph_serialize_str_ = std::to_string(HashGraph(graph)); input_shapes_ = input_shapes; arch_str_ = arch_str; } diff --git a/paddle/fluid/framework/paddle2cinn/cinn_cache_key.h b/paddle/fluid/framework/paddle2cinn/cinn_cache_key.h index 02b152a681c446fd6ffbbc82c5c675bc82297057..941f8e0cdecc188dde30cda12a6cb59ed48c097d 100644 --- a/paddle/fluid/framework/paddle2cinn/cinn_cache_key.h +++ b/paddle/fluid/framework/paddle2cinn/cinn_cache_key.h @@ -58,6 +58,8 @@ class CinnCacheKey { }; private: + size_t HashGraph(const ir::Graph& graph); + std::string graph_serialize_str_; std::map input_shapes_; std::string arch_str_; diff --git a/paddle/fluid/framework/paddle2cinn/cinn_compiler.cc b/paddle/fluid/framework/paddle2cinn/cinn_compiler.cc index 561ebeedb30597c88238b3bf511faf8c605125ff..c870536362e913e3b6a239124915b115a98f89d8 100644 --- a/paddle/fluid/framework/paddle2cinn/cinn_compiler.cc +++ b/paddle/fluid/framework/paddle2cinn/cinn_compiler.cc @@ -139,7 +139,7 @@ std::string CinnCompiler::VizGraph(const Graph& graph) const { node_id, {Dot::Attr("shape", "box"), Dot::Attr("style", "rounded,filled,bold"), Dot::Attr("color", "#303A3A"), Dot::Attr("fontcolor", "#ffffff")}, - n->Name()); + n->Name(), true); } else if (n->IsVar()) { auto label = n->Name(); if (n->Var() && n->Var()->GetType() == proto::VarType::LOD_TENSOR) { @@ -155,7 +155,7 @@ std::string CinnCompiler::VizGraph(const Graph& graph) const { Dot::Attr("color", n->Var()->IsParameter() ? "#148b97" : "#dddddd"), Dot::Attr("fontcolor", n->Var()->IsParameter() ? "#ffffff" : "#000000")}, - label); + label, true); } node2dot[n] = node_id; } diff --git a/paddle/fluid/inference/analysis/dot.h b/paddle/fluid/inference/analysis/dot.h index 4693729cb43d7a9df96b11c4bf3064a70d1db4c3..6d883f558709b70df885ac8d6c5fba0f5474b3d5 100644 --- a/paddle/fluid/inference/analysis/dot.h +++ b/paddle/fluid/inference/analysis/dot.h @@ -59,6 +59,9 @@ class Dot { attrs(attrs), id_("node_" + std::to_string(dot_node_counter++)) {} + Node(const std::string& name, const std::vector& attrs, size_t id) + : name(name), attrs(attrs), id_("node_" + std::to_string(id)) {} + std::string id() const { return id_; } std::string repr() const { @@ -113,10 +116,14 @@ class Dot { explicit Dot(const std::vector& attrs) : attrs_(attrs) {} void AddNode(const std::string& id, const std::vector& attrs, - std::string label = "") { + std::string label = "", bool use_local_id = false) { CHECK(!nodes_.count(id)) << "duplicate Node '" << id << "'"; if (label.empty()) label = id; - nodes_.emplace(id, Node{label, attrs}); + if (use_local_id) { + nodes_.emplace(id, Node{label, attrs, local_node_counter_++}); + } else { + nodes_.emplace(id, Node{label, attrs}); + } } void AddEdge(const std::string& source, const std::string& target, @@ -154,6 +161,8 @@ class Dot { std::unordered_map nodes_; std::vector edges_; std::vector attrs_; + + size_t local_node_counter_{0}; }; } // namespace analysis diff --git a/paddle/fluid/operators/cinn_launch_op.cc b/paddle/fluid/operators/cinn_launch_op.cc index 51c5183241a42cf14870e6a069d63d2252d7c6c5..9e11884b8c1782c01f95735511b7ed0576d4fe62 100644 --- a/paddle/fluid/operators/cinn_launch_op.cc +++ b/paddle/fluid/operators/cinn_launch_op.cc @@ -98,13 +98,13 @@ CinnTensor CinnLaunchContext::GetCinnTensor(const std::string& var_name) { return cinn_scope_->GetTensor(var_name); } -std::vector CinnLaunchContext::GetInternalVariableNames() { +std::unordered_set CinnLaunchContext::GetInternalVariableNames() { std::unordered_set all_parameters(cinn_variable_names_); std::for_each(name2argument_.begin(), name2argument_.end(), [&all_parameters](const auto& name2arg) { all_parameters.erase(name2arg.first); }); - return {all_parameters.begin(), all_parameters.end()}; + return all_parameters; } void CinnLaunchContext::MutableTensorData(const std::string& var_name, diff --git a/paddle/fluid/operators/cinn_launch_op.h b/paddle/fluid/operators/cinn_launch_op.h index f7d1328bcef3fc26473b1f9bf3f629e827248885..99446d15aa208d76f126310f93bd73874f6ba113 100644 --- a/paddle/fluid/operators/cinn_launch_op.h +++ b/paddle/fluid/operators/cinn_launch_op.h @@ -62,7 +62,7 @@ class CinnLaunchContext { // Extract internal variable names from CinnScope // by excluding used input and output variables - std::vector GetInternalVariableNames(); + std::unordered_set GetInternalVariableNames(); // Finalize all execution arguments and return them const std::map& FinalizeArguments() const; diff --git a/paddle/fluid/operators/cinn_launch_op_test.cc b/paddle/fluid/operators/cinn_launch_op_test.cc index 783ca0f86fc37a38443d9e6c57ad98daf97ed5a0..5a07a49a5969aad40ee2b9638f6c3d005cb44fc3 100644 --- a/paddle/fluid/operators/cinn_launch_op_test.cc +++ b/paddle/fluid/operators/cinn_launch_op_test.cc @@ -223,7 +223,7 @@ TEST(CinnLaunchContextTest, TestGetInternalVariableNames) { std::make_unique(GetDefaultCompiledObj()); auto internal_variable_names = launch_context->GetInternalVariableNames(); ASSERT_EQ(internal_variable_names.size(), 1); - EXPECT_EQ(internal_variable_names.front(), "cinn_var2"); + EXPECT_EQ(*internal_variable_names.begin(), "cinn_var2"); } TEST(CinnLaunchContextTest, TestMutableTensorData) {