未验证 提交 6f18b041 编写于 作者: H Huihuang Zheng 提交者: GitHub

Add Basic CINN Runner Class (#35978)

* Add Basic CINN Runner Class

* Add CinnCacheKey

* Add Cache logic and improve CinnCacheKey


* Modify as reviewer commented

* Implement hash_combine to fix MAC build.
上级 74ff59cf
......@@ -26,6 +26,7 @@ add_subdirectory(details)
add_subdirectory(fleet)
add_subdirectory(io)
add_subdirectory(new_executor)
add_subdirectory(paddle2cinn)
#ddim lib
proto_library(framework_proto SRCS framework.proto)
proto_library(pass_desc_proto SRCS pass_desc.proto DEPS framework_proto)
......
cc_library(cinn_cache_key SRCS cinn_cache_key.cc DEPS boost graph graph_helper lod_tensor proto_desc)
cc_library(cinn_compiled_object SRCS cinn_compiled_object.cc DEPS feed_fetch_method graph lod_tensor proto_desc)
cc_library(cinn_runner SRCS cinn_runner.cc DEPS cinn_cache_key cinn_compiled_object feed_fetch_method graph lod_tensor scope)
cc_test(cinn_cache_key_test SRCS cinn_cache_key_test.cc DEPS cinn_cache_key)
cc_test(cinn_runner_test SRCS cinn_runner_test.cc DEPS cinn_runner proto_desc)
cc_test(cinn_compiled_object_test SRCS cinn_compiled_object_test.cc DEPS cinn_compiled_object)
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/paddle2cinn/cinn_cache_key.h"
#include <map>
#include <string>
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph_helper.h"
#include "paddle/fluid/framework/lod_tensor.h"
namespace paddle {
namespace framework {
namespace paddle2cinn {
CinnCacheKey::CinnCacheKey(
const ir::Graph& graph,
const std::map<std::string, const LoDTensor*>& feed_tensors) {
this->SetKey(graph, feed_tensors);
}
CinnCacheKey::CinnCacheKey(const ir::Graph& graph,
const std::map<std::string, DDim>& feed_shapes) {
this->SetKey(graph, feed_shapes);
}
void CinnCacheKey::SetKey(
const ir::Graph& graph,
const std::map<std::string, const LoDTensor*>& feed_tensors) {
ProgramDesc program;
GraphToProgram(graph, &program);
program.Proto()->SerializeToString(&graph_serialize_str_);
for (const auto& name_tensor : feed_tensors) {
feed_shapes_[name_tensor.first] = name_tensor.second->dims();
}
}
void CinnCacheKey::SetKey(const ir::Graph& graph,
const std::map<std::string, DDim>& feed_shapes) {
ProgramDesc program;
GraphToProgram(graph, &program);
program.Proto()->SerializeToString(&graph_serialize_str_);
feed_shapes_ = feed_shapes;
}
bool CinnCacheKey::operator!=(const CinnCacheKey& other) const {
return !this->operator==(other);
}
bool CinnCacheKey::operator==(const CinnCacheKey& other) const {
return graph_serialize_str_ == other.graph_serialize_str_ &&
feed_shapes_ == other.feed_shapes_;
}
size_t CinnCacheKey::Hash::hash_combine(size_t seed, size_t value) {
return seed ^ (value + 0x9e3779b9 + (seed << 6) + (seed >> 2));
}
size_t CinnCacheKey::Hash::operator()(const CinnCacheKey& key) const {
std::size_t ret = 0;
std::hash<std::string> string_hasher;
for (const auto& name_shape : key.feed_shapes_) {
ret = hash_combine(ret, string_hasher(name_shape.first));
ret = hash_combine(ret, string_hasher(name_shape.second.to_str()));
}
ret = hash_combine(ret, string_hasher(key.graph_serialize_str_));
return ret;
}
} // namespace paddle2cinn
} // namespace framework
} // namespace paddle
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <map>
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/lod_tensor.h"
namespace paddle {
namespace framework {
namespace paddle2cinn {
// Class to store the keys for compiling CINN.
//
// CINN cannot handle changable shape now, so CinnRunner keeps a cache mapping
// from CinnCacheKey to CinnCompiledObject.
//
// The CinnCacheKey contains a graph serialized string and the feeded tensor
// shapes.
class CinnCacheKey {
public:
CinnCacheKey(const ir::Graph& graph,
const std::map<std::string, const LoDTensor*>& feed_tensors);
CinnCacheKey(const ir::Graph& graph,
const std::map<std::string, DDim>& feed_shapes);
~CinnCacheKey() {}
void SetKey(const ir::Graph& graph,
const std::map<std::string, const LoDTensor*>& feed_tensors);
void SetKey(const ir::Graph& graph,
const std::map<std::string, DDim>& feed_shapes);
bool operator==(const CinnCacheKey& other) const;
bool operator!=(const CinnCacheKey& other) const;
struct Hash {
static size_t hash_combine(size_t seed, size_t value);
size_t operator()(const CinnCacheKey& key) const;
};
private:
std::string graph_serialize_str_;
std::map<std::string, DDim> feed_shapes_;
};
} // namespace paddle2cinn
} // namespace framework
} // namespace paddle
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <map>
#include <unordered_set>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/paddle2cinn/cinn_cache_key.h"
#include "paddle/fluid/framework/program_desc.h"
namespace paddle {
namespace framework {
namespace paddle2cinn {
TEST(CinnCacheKeyTest, TestAsUnorderedKey) {
std::unordered_set<CinnCacheKey, CinnCacheKey::Hash> test_set;
ProgramDesc empty_program;
ir::Graph empty_graph(empty_program);
ProgramDesc program;
auto *global_block = program.MutableBlock(0);
auto *x = global_block->Var("X");
x->SetType(proto::VarType::LOD_TENSOR);
ir::Graph graph(program);
LoDTensor tensor;
tensor.Resize({1, 2, 3});
const LoDTensor *tensor_pointer = &tensor;
std::map<std::string, const LoDTensor *> feed_tensors = {
{"X", tensor_pointer}};
DDim ddim = paddle::framework::make_ddim({1, 2, 3});
std::map<std::string, DDim> feed_shapes = {{"X", ddim}};
CinnCacheKey cache_key1(empty_graph, feed_tensors);
CinnCacheKey cache_key2(empty_graph, feed_shapes);
EXPECT_EQ(cache_key1, cache_key2);
CinnCacheKey cache_key3(graph, feed_shapes);
CinnCacheKey cache_key4(graph, feed_tensors);
EXPECT_EQ(cache_key3, cache_key4);
CinnCacheKey cache_key5(empty_graph,
std::map<std::string, const LoDTensor *>());
CinnCacheKey cache_key6(empty_graph, std::map<std::string, DDim>());
EXPECT_EQ(cache_key5, cache_key6);
EXPECT_NE(cache_key1, cache_key3);
EXPECT_NE(cache_key4, cache_key2);
EXPECT_NE(cache_key3, cache_key5);
EXPECT_NE(cache_key6, cache_key4);
EXPECT_NE(cache_key5, cache_key1);
EXPECT_NE(cache_key2, cache_key6);
test_set.insert(cache_key1);
test_set.insert(cache_key2);
test_set.insert(cache_key3);
test_set.insert(cache_key4);
test_set.insert(cache_key5);
test_set.insert(cache_key6);
EXPECT_EQ(test_set.size(), 3U);
auto iter = test_set.find(cache_key1);
EXPECT_NE(iter, test_set.end());
test_set.erase(iter);
EXPECT_EQ(test_set.size(), 2U);
EXPECT_EQ(test_set.find(cache_key2), test_set.end());
iter = test_set.find(cache_key3);
EXPECT_NE(iter, test_set.end());
test_set.erase(iter);
EXPECT_EQ(test_set.size(), 1U);
EXPECT_EQ(test_set.find(cache_key4), test_set.end());
iter = test_set.find(cache_key5);
EXPECT_NE(iter, test_set.end());
test_set.erase(iter);
EXPECT_EQ(test_set.size(), 0U);
EXPECT_EQ(test_set.find(cache_key6), test_set.end());
}
} // namespace paddle2cinn
} // namespace framework
} // namespace paddle
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/paddle2cinn/cinn_compiled_object.h"
#include <map>
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
namespace paddle {
namespace framework {
namespace paddle2cinn {
CinnCompiledObject::CinnCompiledObject() {
// TODO(zhhsplendid): complete this function after CINN interface is ready
}
CinnCompiledObject::~CinnCompiledObject() {
// TODO(zhhsplendid): complete this function after CINN interface is ready
}
void CinnCompiledObject::Compile(
const ir::Graph& graph,
std::map<std::string, const LoDTensor*>* feed_targets) {
// TODO(zhhsplendid): complete this function after CINN interface is ready
}
std::map<std::string, FetchType*> CinnCompiledObject::Run(
Scope* scope, std::map<std::string, const LoDTensor*>* feed_targets) {
// TODO(zhhsplendid): complete this function after CINN interface is ready
return std::map<std::string, FetchType*>();
}
} // namespace paddle2cinn
} // namespace framework
} // namespace paddle
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <map>
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/scope.h"
namespace paddle {
namespace framework {
namespace paddle2cinn {
// Class to store and call CINN complied object
class CinnCompiledObject {
public:
CinnCompiledObject();
~CinnCompiledObject();
// Compiles use CINN. CINN compilation needs model graph, input names, and
// input_shapes
void Compile(const ir::Graph& graph,
std::map<std::string, const LoDTensor*>* feed_targets);
// Feed LoDTensors to tun CINN compiled object and return fetched result
std::map<std::string, FetchType*> Run(
Scope* scope, std::map<std::string, const LoDTensor*>* feed_targets);
// Converts compiled object to Paddle Graph
// To be discussed
// ir::Graph ToGraph();
};
} // namespace paddle2cinn
} // namespace framework
} // namespace paddle
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <map>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/paddle2cinn/cinn_compiled_object.h"
#include "paddle/fluid/framework/program_desc.h"
namespace paddle {
namespace framework {
namespace paddle2cinn {
TEST(CinnCompiledObjecctTest, TodoTest) {
ProgramDesc empty_program;
ir::Graph empty_graph(empty_program);
std::map<std::string, const LoDTensor*> empty_feed;
Scope empty_scope;
CinnCompiledObject compiled_obj;
compiled_obj.Compile(empty_graph, &empty_feed);
auto fetch = compiled_obj.Run(&empty_scope, &empty_feed);
}
} // namespace paddle2cinn
} // namespace framework
} // namespace paddle
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/paddle2cinn/cinn_runner.h"
#include <map>
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/tensor.h"
namespace paddle {
namespace framework {
namespace paddle2cinn {
using ir::Graph;
std::map<std::string, FetchType*> CinnRunner::Run(
const Graph& graph, Scope* scope,
std::map<std::string, const LoDTensor*>* feed_targets) {
CinnCacheKey cur_key(graph, *feed_targets);
std::shared_ptr<CinnCompiledObject> obj_to_run;
if (cache_.find(cur_key) != cache_.end()) {
obj_to_run = cache_[cur_key];
} else {
obj_to_run = std::make_shared<CinnCompiledObject>();
obj_to_run->Compile(graph, feed_targets);
cache_[cur_key] = obj_to_run;
}
return obj_to_run->Run(scope, feed_targets);
}
} // namespace paddle2cinn
} // namespace framework
} // namespace paddle
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <map>
#include <memory>
#include <unordered_map>
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/paddle2cinn/cinn_cache_key.h"
#include "paddle/fluid/framework/paddle2cinn/cinn_compiled_object.h"
#include "paddle/fluid/framework/scope.h"
namespace paddle {
namespace framework {
namespace paddle2cinn {
// Entrance to run CINN.
//
// CINN cannot handle changable shape now, so CinnRunner keeps a cache mapping
// from CinnCacheKey to CinnCompiledObject. If cache hits, we will re-use cache
// stored CinnCompiledObject, otherwise we will compile again and put into
// cache.
class CinnRunner {
public:
CinnRunner() {}
~CinnRunner() {}
// Feed LoDTensors to tun CINN compiled object and return fetched result
std::map<std::string, FetchType*> Run(
const ir::Graph& graph, Scope* scope,
std::map<std::string, const LoDTensor*>* feed_targets);
private:
std::unordered_map<CinnCacheKey, std::shared_ptr<CinnCompiledObject>,
CinnCacheKey::Hash>
cache_;
};
} // namespace paddle2cinn
} // namespace framework
} // namespace paddle
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "gtest/gtest.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/paddle2cinn/cinn_runner.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
namespace paddle {
namespace framework {
namespace paddle2cinn {
using ir::Graph;
TEST(CinnRunnerTest, TodoTest) {
ProgramDesc empty_program;
Graph empty_graph(empty_program);
Scope empty_scope;
std::map<std::string, const LoDTensor*> empty_feed;
CinnRunner cinn_runner;
cinn_runner.Run(empty_graph, &empty_scope, &empty_feed);
}
} // namespace paddle2cinn
} // namespace framework
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册