未验证 提交 bbd4bd73 编写于 作者: J jiangcheng 提交者: GitHub

add cinn graph symbolization (#36417)

* add cinn graph symbolization

* fix some bug

* add paddle scope to cinn scope

* add paddle scope to CINN scope in Symbolization, and add feed op when build cinn pass

* fix some bug

* fix some bug by review advices

* optimize code problem

* revert build_cinn_pass and move the change to https://github.com/PaddlePaddle/Paddle/pull/36503

* fix some bug after co-compilation

* perfect single test script

* remove scope and rename feed_target to input_tensor

* using std::unordered_map instead of absl::flat_hash_map

* fix single test bug

* revert to preverion for WITH_CINN has add in later PR

* full error information for CI

* full enfore information for CI pass
上级 99e396f8
......@@ -5,7 +5,10 @@ cc_library(build_cinn_pass SRCS build_cinn_pass.cc DEPS pass subgraph_detector)
if (WITH_CINN)
cc_library(transform_desc SRCS transform_desc.cc DEPS proto_desc cinn)
cc_library(cinn_graph_symbolization SRCS cinn_graph_symbolization.cc DEPS lod_tensor graph graph_helper transform_desc cinn)
cc_test(test_transform_desc SRCS transform_desc_test.cc DEPS transform_desc)
cc_test(test_cinn_graph_symbolization SRCS cinn_graph_symbolization_test.cc DEPS cinn_graph_symbolization)
endif()
cc_test(cinn_cache_key_test SRCS cinn_cache_key_test.cc DEPS cinn_cache_key)
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/paddle2cinn/cinn_graph_symbolization.h"
#include <algorithm>
#include <iterator>
#include <queue>
#include <vector>
#include "paddle/fluid/framework/ir/graph_helper.h"
#include "paddle/fluid/framework/paddle2cinn/transform_desc.h"
#include "paddle/fluid/framework/variable.h"
#include "cinn/frontend/op_mappers/use_op_mappers.h"
#include "cinn/frontend/var_type_utils.h"
namespace paddle {
namespace framework {
namespace paddle2cinn {
using ir::Graph;
using ir::Node;
using CinnTensor = ::cinn::hlir::framework::Tensor;
using OpMapperContext = CinnGraphSymbolization::OpMapperContext;
using CinnOpDesc = CinnGraphSymbolization::CinnOpDesc;
using FeedInfoMap = CinnGraphSymbolization::FeedInfoMap;
namespace utils {
OpMapperContext::FeedInfo GetCinnFeedInfoFromTensor(const Tensor& tensor) {
OpMapperContext::FeedInfo info;
const auto& dim = tensor.dims();
for (int i = 0; i < dim.size(); i++) {
info.shape.emplace_back(static_cast<int>(dim[i]));
}
auto cinn_var_type = TransformVarDataTypeToCinn(tensor.type());
info.type = ::cinn::frontend::utils::CppVarType2CommonType(cinn_var_type);
return info;
}
} // namespace utils
FeedInfoMap CinnGraphSymbolization::GetFeedInfoMapFromInput() const {
FeedInfoMap feed_map;
for (auto& feed_pair : input_tensors_) {
const auto& feed_name = feed_pair.first;
const auto* tensor = feed_pair.second;
feed_map[feed_name] = utils::GetCinnFeedInfoFromTensor(*tensor);
}
return feed_map;
}
// get the graph's op input Parameter var name set
std::unordered_set<std::string>
CinnGraphSymbolization::GetGraphInputParameterNames() const {
std::unordered_set<std::string> names;
for (auto* node : graph_.Nodes()) {
if (node->IsOp()) {
for (auto* var : node->inputs) {
if (var->Var()->IsParameter()) {
// Only need preserve the input parameter var of graph,
// others do not.
names.insert(var->Name());
}
}
}
}
return names;
}
// Transform paddle scope to cinn, note that we only preserve the graph’s
// input parameter variable and ignore others.
std::shared_ptr<::cinn::hlir::framework::Scope>
CinnGraphSymbolization::CreateCinnScope(const FeedInfoMap& feed_map) const {
auto cinn_scope = ::cinn::hlir::framework::Scope::Create();
// get the graph's input parameter variable name list
auto parameter_names = GetGraphInputParameterNames();
for (const auto& param_name : parameter_names) {
VLOG(4) << "add param var [" << param_name << "] info scope";
// if cannot find var in graph input, skip.
// scope accepte the CINN format name, so here we need transform
// paddle format name to CINN format.
auto* cinn_var = cinn_scope->Var<CinnTensor>(
::cinn::utils::TransValidVarName(param_name));
auto& cinn_tensor = absl::get<CinnTensor>(*cinn_var);
// here we only need preserve dtype and shape, do not need preserve data
auto feed_info = feed_map.at(param_name);
cinn_tensor->set_type(feed_info.type);
cinn_tensor->Resize(::cinn::hlir::framework::Shape(feed_info.shape));
}
return cinn_scope;
}
std::vector<std::unique_ptr<CinnOpDesc>>
CinnGraphSymbolization::TransformAllGraphOpToCinn() const {
std::vector<std::unique_ptr<CinnOpDesc>> cinn_op_descs;
const auto& sorted_ops = ir::TopologySortOperations(graph_);
for (auto* node : sorted_ops) {
cinn_op_descs.emplace_back(std::make_unique<CinnOpDesc>());
auto& cinn_desc = cinn_op_descs.back();
TransformOpDescToCinn(node->Op(), cinn_desc.get());
}
return cinn_op_descs;
}
void CinnGraphSymbolization::RunOp(const CinnOpDesc& op_desc,
const OpMapperContext& ctx) const {
const auto& op_type = op_desc.Type();
auto* kernel = ::cinn::frontend::OpMapperRegistry::Global()->Find(op_type);
PADDLE_ENFORCE_NE(kernel, nullptr,
platform::errors::NotFound(
"Op %s is Not Supported by CINN, please register"
" this op in the CINN repo.",
op_type.c_str()));
VLOG(4) << "Running Op " << op_type;
kernel->Run(op_desc, ctx);
}
void CinnGraphSymbolization::RunGraph(const OpMapperContext& ctx) const {
auto cinn_op_descs = TransformAllGraphOpToCinn();
// run the CINN op one by one, note that all ops
// have been sorted at constructor.
for (auto& op_desc : cinn_op_descs) {
RunOp(*op_desc, ctx);
}
}
::cinn::frontend::Program CinnGraphSymbolization::operator()() {
std::string builder_name = "NetBuilder_of_graph_" + std::to_string(graph_id_);
VLOG(4) << "NetBuilder Name " << builder_name;
::cinn::frontend::NetBuilder builder(builder_name);
auto feed_map = GetFeedInfoMapFromInput();
auto cinn_scope = CreateCinnScope(feed_map);
OpMapperContext ctx(*cinn_scope, target_, &builder, &var_map_,
&var_model_to_program_map_);
// add all tensor's feed info into context
for (auto& feed_pair : feed_map) {
ctx.AddFeedInfo(feed_pair.first, feed_pair.second);
VLOG(4) << "add feed var [" << feed_pair.first << "] info context";
}
RunGraph(ctx);
return builder.Build();
}
} // namespace paddle2cinn
} // namespace framework
} // namespace paddle
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <map>
#include <unordered_map>
#include <unordered_set>
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/scope.h"
#include "cinn/frontend/net_builder.h"
#include "cinn/frontend/op_mapper_registry.h"
namespace paddle {
namespace framework {
namespace paddle2cinn {
// An executor accept subgraph which is generated by BuildCinnPass,
// run each op's CINN Op Mapper, finally return a frontend::Program object
// corresponding to the subgraph.
//
// Parameter:
// 1. graph_id:
// the unique graph id, used for generating unique NetBuilder name.
// 2. graph:
// the CINN subgraph whose op are all supported by CINN, and the
// graph is independently of other graph.
// 3. input_tensors:
// all input var nodes of CINN subgraph, they are necessary for
// we need pass the shape and data type into CINN, otherwise the
// NetBuilder may error for the shape not meet the precondition.
//
// Describe:
// The main function is operator(), it will run all op function by CINN
// OpMapper and finally return a program object.
// The executor operator() consisted by the following step:
// 1. create a NetBuilder, it's name is unique for each graph;
// 2. create OpMapperContext, contain scope, target, local var_map and
// local var_model_to_program_map;
// 3. add all feed var into OpMapperContext to pass the shape and type
// into CINN;
// 4. topological sorting graph op nodes;
// 5. transform all op from paddle opdesc format to cinn opdesc format;
// 5. run the CINN op in graph one by one. Note that the graph have been
// topo sorted;
// 6. return the NetBuilder.Build() after all op run.
class CinnGraphSymbolization {
public:
CinnGraphSymbolization(
int64_t graph_id, const ir::Graph& graph,
const ::cinn::common::Target& target,
const std::map<std::string, const LoDTensor*>& input_tensors)
: graph_id_(graph_id),
graph_(graph),
target_(target),
input_tensors_(input_tensors) {}
// run all CINN op in graph by topo sorting then return its NetBuilder
::cinn::frontend::Program operator()();
// return the internal variable map
const std::unordered_map<std::string, ::cinn::frontend::Variable>& var_map()
const {
return var_map_;
}
// return the map from the variable name in paddle model to cinn program.
const std::unordered_map<std::string, std::string>& var_model_to_program_map()
const {
return var_model_to_program_map_;
}
using OpMapperContext = ::cinn::frontend::OpMapperContext;
using FeedInfoMap =
std::unordered_map<std::string, OpMapperContext::FeedInfo>;
using CinnOpDesc = ::cinn::frontend::paddle::cpp::OpDesc;
private:
const int64_t graph_id_;
const ir::Graph& graph_;
const ::cinn::common::Target& target_;
const std::map<std::string, const LoDTensor*>& input_tensors_;
// preserve local variable map
std::unordered_map<std::string, ::cinn::frontend::Variable> var_map_;
std::unordered_map<std::string, std::string> var_model_to_program_map_;
// transform all paddle var desc in feed list into cinn_var_descs_
FeedInfoMap GetFeedInfoMapFromInput() const;
// transform all paddle op desc in graph into cinn op desc
std::vector<std::unique_ptr<CinnOpDesc>> TransformAllGraphOpToCinn() const;
// RunOp accept OpDesc and global run context then run
// it's kernel registered in OpMapper.
// called in RunGraph.
void RunOp(const CinnOpDesc& op_desc, const OpMapperContext& ctx) const;
// preserve var desc, run the op one by one.
void RunGraph(const OpMapperContext& ctx) const;
// create cinn scope and add parameter's feed info into scope
std::shared_ptr<::cinn::hlir::framework::Scope> CreateCinnScope(
const FeedInfoMap& feed_map) const;
// get the graph op's input persistable var name set
std::unordered_set<std::string> GetGraphInputParameterNames() const;
friend class CinnGraphSymbolizationForTest;
};
} // namespace paddle2cinn
} // namespace framework
} // namespace paddle
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "gtest/gtest.h"
#include "paddle/fluid/framework/paddle2cinn/cinn_graph_symbolization.h"
namespace paddle {
namespace framework {
namespace paddle2cinn {
using ir::Graph;
using ir::Node;
using ::cinn::frontend::NetBuilder;
using CinnTensor = ::cinn::hlir::framework::Tensor;
using OpMapperContext = CinnGraphSymbolization::OpMapperContext;
using CinnOpDesc = CinnGraphSymbolization::CinnOpDesc;
using FeedInfoMap = CinnGraphSymbolization::FeedInfoMap;
// only used for test CinnGraphSymbolization class
class CinnGraphSymbolizationForTest {
public:
explicit CinnGraphSymbolizationForTest(CinnGraphSymbolization* cinn_symbol)
: cinn_symbol_(cinn_symbol) {}
std::unordered_set<std::string> GetGraphInputParameterNames() {
return cinn_symbol_->GetGraphInputParameterNames();
}
std::shared_ptr<::cinn::hlir::framework::Scope> CreateCinnScope(
const FeedInfoMap& feed_map) {
return cinn_symbol_->CreateCinnScope(feed_map);
}
OpMapperContext CreateNewContext(NetBuilder* builder,
const FeedInfoMap& feed_map) {
return OpMapperContext(*cinn_symbol_->CreateCinnScope(feed_map),
cinn_symbol_->target_, builder,
&cinn_symbol_->var_map_,
&cinn_symbol_->var_model_to_program_map_);
}
FeedInfoMap GetFeedInfoMapFromInput() {
return cinn_symbol_->GetFeedInfoMapFromInput();
}
std::vector<std::unique_ptr<CinnOpDesc>> TransformAllGraphOpToCinn() {
return cinn_symbol_->TransformAllGraphOpToCinn();
}
void RunOp(const CinnOpDesc& op_desc, const OpMapperContext& ctx) {
cinn_symbol_->RunOp(op_desc, ctx);
}
private:
CinnGraphSymbolization* cinn_symbol_;
};
class CinnGraphSymbolizationTest : public ::testing::Test {
public:
CinnGraphSymbolizationTest() {
int64_t graph_id = 100;
graph_ = BuildAllOpSupportCinnGraph();
target_ = CreateDefaultTarget();
feed_tensors_ = CreateFeedTensors();
feed_targets_ = ConvertFeedType(feed_tensors_);
symbol_ = std::make_unique<CinnGraphSymbolization>(graph_id, *graph_,
target_, feed_targets_);
builder_ = std::make_unique<NetBuilder>("NetBuilder_of_graph_" +
std::to_string(graph_id));
test_ = std::make_unique<CinnGraphSymbolizationForTest>(symbol_.get());
feed_map_ = test_->GetFeedInfoMapFromInput();
}
std::unique_ptr<CinnGraphSymbolization> symbol_;
std::unique_ptr<CinnGraphSymbolizationForTest> test_;
std::map<std::string, const LoDTensor*> feed_targets_;
OpMapperContext CreateNewContext() {
return test_->CreateNewContext(builder_.get(), feed_map_);
}
std::shared_ptr<::cinn::hlir::framework::Scope> CreateCinnScope() {
return test_->CreateCinnScope(feed_map_);
}
private:
std::unique_ptr<Graph> graph_;
::cinn::common::Target target_;
std::map<std::string, LoDTensor> feed_tensors_;
std::unique_ptr<NetBuilder> builder_;
FeedInfoMap feed_map_;
std::unique_ptr<Graph> BuildAllOpSupportCinnGraph() {
ProgramDesc prog;
auto g = std::make_unique<Graph>(prog);
// v1 --
// | --> mul --> v3 --
// v2 -- | --> add --> v5 --> relu --> v6
// v4 --
OpDesc add_op;
add_op.SetType("add");
add_op.SetInput("X", {"var3"});
add_op.SetInput("Y", {"var4"});
add_op.SetOutput("Out", {"var5"});
OpDesc mul_op;
mul_op.SetType("mul");
mul_op.SetInput("X", {"var1"});
mul_op.SetInput("Y", {"var2"});
mul_op.SetOutput("Out", {"var3"});
OpDesc relu_op;
relu_op.SetType("relu");
relu_op.SetInput("X", {"var5"});
relu_op.SetOutput("Out", {"var6"});
OpDesc feed_var1;
feed_var1.SetType("feed");
feed_var1.SetOutput("Out", {"var1"});
OpDesc feed_var4;
feed_var4.SetType("feed");
feed_var4.SetOutput("Out", {"var4"});
VarDesc var1("var1");
VarDesc var2("var2");
var2.SetPersistable(true);
var2.SetIsParameter(true);
VarDesc var3("var3");
VarDesc var4("var4");
VarDesc var5("var5");
VarDesc var6("var6");
ir::Node* add = g->CreateOpNode(&add_op);
ir::Node* mul = g->CreateOpNode(&mul_op);
ir::Node* relu = g->CreateOpNode(&relu_op);
ir::Node* feed1 = g->CreateOpNode(&feed_var1);
ir::Node* feed4 = g->CreateOpNode(&feed_var4);
ir::Node* v1 = g->CreateVarNode(&var1);
ir::Node* v2 = g->CreateVarNode(&var2);
ir::Node* v3 = g->CreateVarNode(&var3);
ir::Node* v4 = g->CreateVarNode(&var4);
ir::Node* v5 = g->CreateVarNode(&var5);
ir::Node* v6 = g->CreateVarNode(&var6);
// fill op node
feed1->outputs = {v1};
feed4->outputs = {v4};
mul->inputs = {v1, v2};
mul->outputs = {v3};
add->inputs = {v3, v4};
add->outputs = {v5};
relu->inputs = {v5};
relu->outputs = {v6};
// fill variable node
v1->inputs = {feed1};
v1->outputs = {mul};
v2->outputs = {mul};
v3->inputs = {mul};
v3->outputs = {add};
v4->inputs = {feed4};
v4->outputs = {add};
v5->inputs = {add};
v5->outputs = {relu};
v6->inputs = {relu};
return g;
}
::cinn::common::Target CreateDefaultTarget(bool use_gpu = false) {
#ifdef PADDLE_WITH_CUDA
if (use_gpu) {
return ::cinn::common::DefaultNVGPUTarget();
}
#endif
return ::cinn::common::DefaultHostTarget();
}
std::map<std::string, LoDTensor> CreateFeedTensors() {
std::map<std::string, LoDTensor> feed_targets;
auto create_tensor = []() {
LoDTensor tensor;
DDim dims = {256, 1024};
tensor.Resize(dims);
tensor.mutable_data(platform::CPUPlace(), proto::VarType::FP32);
return tensor;
};
#define FillFeedList(Name) feed_targets[#Name] = create_tensor();
FillFeedList(var1);
FillFeedList(var2);
FillFeedList(var3);
FillFeedList(var4);
FillFeedList(var5);
FillFeedList(var6);
#undef FillFeedList
DDim y_dim = {1024, 1024};
feed_targets["var2"].Resize(y_dim);
return feed_targets;
}
std::map<std::string, const LoDTensor*> ConvertFeedType(
const std::map<std::string, LoDTensor>& feed_targets) {
std::map<std::string, const LoDTensor*> res;
for (auto& feed_pair : feed_targets) {
res[feed_pair.first] = &feed_pair.second;
}
return res;
}
};
TEST_F(CinnGraphSymbolizationTest, feed_map) {
auto feed_map = test_->GetFeedInfoMapFromInput();
auto ctx = CreateNewContext();
ASSERT_TRUE(feed_map.count("var1"));
ASSERT_TRUE(feed_map.count("var2"));
auto feed_info = feed_map.at("var1");
ASSERT_EQ(feed_info.shape, std::vector<int>({256, 1024}));
ASSERT_EQ(feed_info.type, ::cinn::common::F32());
}
TEST_F(CinnGraphSymbolizationTest, scope) {
auto prame_names = test_->GetGraphInputParameterNames();
ASSERT_EQ(prame_names, std::unordered_set<std::string>({"var2"}));
auto cinn_scope = CreateCinnScope();
auto* var1 = cinn_scope->FindVar("var1");
ASSERT_EQ(var1, nullptr);
auto* var2 = cinn_scope->FindVar("var2");
ASSERT_NE(var2, nullptr);
auto& cinn_tensor = absl::get<CinnTensor>(*var2);
ASSERT_EQ(cinn_tensor->shape().data(), std::vector<int>({1024, 1024}));
ASSERT_EQ(cinn_tensor->type(), ::cinn::common::F32());
}
TEST_F(CinnGraphSymbolizationTest, sortgraph) {
auto cinn_op_descs = test_->TransformAllGraphOpToCinn();
ASSERT_FALSE(cinn_op_descs.empty());
std::vector<std::string> sort_names;
for (auto& desc : cinn_op_descs) {
sort_names.emplace_back(desc->Type());
}
ASSERT_EQ(sort_names,
std::vector<std::string>({"feed", "mul", "feed", "add", "relu"}));
}
TEST_F(CinnGraphSymbolizationTest, runop) {
auto cinn_op_descs = test_->TransformAllGraphOpToCinn();
auto feed_map = test_->GetFeedInfoMapFromInput();
auto ctx = CreateNewContext();
// add all tensor's feed info into context
for (auto& feed_pair : feed_map) {
ctx.AddFeedInfo(feed_pair.first, feed_pair.second);
}
ASSERT_NO_THROW(test_->RunOp(*cinn_op_descs[0], ctx));
CinnOpDesc desc;
desc.SetType("fake");
ASSERT_ANY_THROW(test_->RunOp(desc, ctx));
}
TEST_F(CinnGraphSymbolizationTest, basic) {
ASSERT_NO_THROW((*symbol_)());
ASSERT_FALSE(symbol_->var_map().empty());
ASSERT_FALSE(symbol_->var_model_to_program_map().empty());
}
} // namespace paddle2cinn
} // namespace framework
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册