未验证 提交 ede0990f 编写于 作者: W WangZhen 提交者: GitHub

[JitLayer]Rename Function to Engine and using new Function class to warp Engine (#44900)

* Polish function code

* Rename funciton to engine

* Fix Log msg and doc

* Rename Function to Engine and using new Function class to warp Engine

* Rename EngineInfo

* Adjust member variable order
上级 80cc4f0d
add_subdirectory(function) add_subdirectory(engine)
proto_library(paddle_jit_property_proto SRCS property.proto) proto_library(paddle_jit_property_proto SRCS property.proto)
cc_library( cc_library(
...@@ -31,6 +31,11 @@ cc_library( ...@@ -31,6 +31,11 @@ cc_library(
SRCS function_schema.cc SRCS function_schema.cc
DEPS jit_function_utils) DEPS jit_function_utils)
cc_library(
jit_function
SRCS function.cc
DEPS jit_function_utils jit_executor_engine jit_pe_engine)
cc_library( cc_library(
jit_layer jit_layer
SRCS layer.cc SRCS layer.cc
...@@ -39,8 +44,9 @@ cc_library( ...@@ -39,8 +44,9 @@ cc_library(
jit_serializer_utils jit_serializer_utils
jit_compilation_unit jit_compilation_unit
jit_function_schema jit_function_schema
jit_executor_function jit_executor_engine
jit_pe_function) jit_pe_engine
jit_function)
if(WITH_TESTING AND NOT WIN32) if(WITH_TESTING AND NOT WIN32)
add_custom_target( add_custom_target(
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#pragma once #pragma once
#include "function/base_function.h" // NOLINT #include "function.h" //NOLINT
#include "layer.h" // NOLINT #include "layer.h" // NOLINT
#include "serializer.h" // NOLINT #include "serializer.h" // NOLINT
#include "serializer_utils.h" // NOLINT #include "serializer_utils.h" // NOLINT
...@@ -16,37 +16,27 @@ ...@@ -16,37 +16,27 @@
#include "paddle/phi/core/enforce.h" #include "paddle/phi/core/enforce.h"
#include "paddle/fluid/jit/function/base_function.h" #include "paddle/fluid/jit/engine/base_engine.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
std::shared_ptr<BaseFunction> CompilationUnit::Function( std::shared_ptr<BaseEngine> CompilationUnit::GetEngine(
const std::string &name) const { const std::string &name) const {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
function_map_.count(name), engine_map_.count(name),
1, 1,
phi::errors::InvalidArgument( phi::errors::InvalidArgument(
"Funciton name %s is not exist in function_map_.", name)); "Funciton named %s is not exist in engine_map_.", name));
return function_map_.at(name); return engine_map_.at(name);
} }
void CompilationUnit::SetFunction( void CompilationUnit::SetEngine(const std::string &name,
const std::string &name, const std::shared_ptr<BaseFunction> &function) { const std::shared_ptr<BaseEngine> &engine) {
function_map_[name] = function; engine_map_[name] = engine;
} }
std::vector<std::string> CompilationUnit::FunctionNames() const { const Name2EngineMap &CompilationUnit::EngineMap() const { return engine_map_; }
std::vector<std::string> names;
for (auto it = function_map_.begin(); it != function_map_.end(); it++) {
names.emplace_back(it->first);
}
return names;
}
const Name2FunctionMap &CompilationUnit::FunctionMap() const {
return function_map_;
}
} // namespace jit } // namespace jit
} // namespace paddle } // namespace paddle
...@@ -21,26 +21,24 @@ ...@@ -21,26 +21,24 @@
namespace paddle { namespace paddle {
namespace jit { namespace jit {
class BaseFunction; class BaseEngine;
using Name2FunctionMap = using Name2EngineMap =
std::unordered_map<std::string, std::shared_ptr<BaseFunction>>; std::unordered_map<std::string, std::shared_ptr<BaseEngine>>;
class CompilationUnit { class CompilationUnit {
public: public:
CompilationUnit() = default; CompilationUnit() = default;
~CompilationUnit() {} ~CompilationUnit() {}
std::shared_ptr<BaseFunction> Function(const std::string &name) const; std::shared_ptr<BaseEngine> GetEngine(const std::string &name) const;
void SetFunction(const std::string &name, void SetEngine(const std::string &name,
const std::shared_ptr<BaseFunction> &function); const std::shared_ptr<BaseEngine> &engine);
std::vector<std::string> FunctionNames() const; const Name2EngineMap &EngineMap() const;
const Name2FunctionMap &FunctionMap() const;
private: private:
Name2FunctionMap function_map_; Name2EngineMap engine_map_;
}; };
} // namespace jit } // namespace jit
......
cc_library( cc_library(
jit_executor_function jit_executor_engine
SRCS executor_function.cc SRCS executor_engine.cc
DEPS executor) DEPS executor)
cc_library( cc_library(
jit_pe_function jit_pe_engine
SRCS pe_function.cc SRCS pe_engine.cc
DEPS parallel_executor) DEPS parallel_executor)
...@@ -22,14 +22,14 @@ namespace jit { ...@@ -22,14 +22,14 @@ namespace jit {
using Tensor = paddle::experimental::Tensor; using Tensor = paddle::experimental::Tensor;
using DenseTensor = phi::DenseTensor; using DenseTensor = phi::DenseTensor;
class BaseFunction { class BaseEngine {
public: public:
virtual std::vector<DenseTensor> operator()( virtual std::vector<DenseTensor> operator()(
const std::vector<DenseTensor> &inputs) = 0; const std::vector<DenseTensor> &inputs) = 0;
virtual std::vector<Tensor> operator()(const std::vector<Tensor> &inputs) = 0; virtual std::vector<Tensor> operator()(const std::vector<Tensor> &inputs) = 0;
virtual ~BaseFunction() {} virtual ~BaseEngine() {}
}; };
} // namespace jit } // namespace jit
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/fluid/jit/function/executor_function.h" #include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable.h"
...@@ -21,9 +21,9 @@ ...@@ -21,9 +21,9 @@
namespace paddle { namespace paddle {
namespace jit { namespace jit {
ExecutorFunction::ExecutorFunction(const std::shared_ptr<FunctionInfo> &info, ExecutorEngine::ExecutorEngine(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict, const Name2VariableMap &params_dict,
const phi::Place &place) const phi::Place &place)
: info_(info), place_(place), inner_exe_(place_) { : info_(info), place_(place), inner_exe_(place_) {
info_->RemoveDescFeedFetch(); info_->RemoveDescFeedFetch();
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(
...@@ -35,13 +35,13 @@ ExecutorFunction::ExecutorFunction(const std::shared_ptr<FunctionInfo> &info, ...@@ -35,13 +35,13 @@ ExecutorFunction::ExecutorFunction(const std::shared_ptr<FunctionInfo> &info,
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_);
} }
std::vector<Tensor> ExecutorFunction::operator()( std::vector<Tensor> ExecutorEngine::operator()(
const std::vector<Tensor> &inputs) { const std::vector<Tensor> &inputs) {
auto dense_tensors = utils::ToDenseTensors(inputs); auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors)); return utils::ToTensors(this->operator()(dense_tensors));
} }
std::vector<DenseTensor> ExecutorFunction::operator()( std::vector<DenseTensor> ExecutorEngine::operator()(
const std::vector<DenseTensor> &inputs) { const std::vector<DenseTensor> &inputs) {
utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_); utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
inner_exe_.Run(info_->ProgramDesc(), inner_exe_.Run(info_->ProgramDesc(),
...@@ -55,7 +55,7 @@ std::vector<DenseTensor> ExecutorFunction::operator()( ...@@ -55,7 +55,7 @@ std::vector<DenseTensor> ExecutorFunction::operator()(
return outputs; return outputs;
} }
const std::shared_ptr<FunctionInfo> &ExecutorFunction::Info() const { const std::shared_ptr<FunctionInfo> &ExecutorEngine::Info() const {
return info_; return info_;
} }
......
...@@ -19,20 +19,20 @@ ...@@ -19,20 +19,20 @@
#include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/jit/function/base_function.h" #include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function_schema.h" #include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/function_utils.h" #include "paddle/fluid/jit/function_utils.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
class ExecutorFunction : public BaseFunction { class ExecutorEngine : public BaseEngine {
public: public:
ExecutorFunction(const std::shared_ptr<FunctionInfo> &info, ExecutorEngine(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict, const Name2VariableMap &params_dict,
const phi::Place &place); const phi::Place &place);
~ExecutorFunction() noexcept {} ~ExecutorEngine() noexcept {}
std::vector<Tensor> operator()(const std::vector<Tensor> &inputs); std::vector<Tensor> operator()(const std::vector<Tensor> &inputs);
......
...@@ -12,10 +12,13 @@ ...@@ -12,10 +12,13 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/fluid/jit/function/pe_function.h" #include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/details/build_strategy.h" #include "paddle/fluid/framework/details/build_strategy.h"
#include "paddle/fluid/framework/details/execution_strategy.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/parallel_executor.h"
#include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/program_desc.h"
#include "paddle/phi/core/enforce.h" #include "paddle/phi/core/enforce.h"
...@@ -54,9 +57,9 @@ static ExecutionStrategy GetExecutionStrategy(const platform::Place &place) { ...@@ -54,9 +57,9 @@ static ExecutionStrategy GetExecutionStrategy(const platform::Place &place) {
return execution_strategy; return execution_strategy;
} }
PEFunction::PEFunction(const std::shared_ptr<FunctionInfo> &info, PEEngine::PEEngine(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict, const Name2VariableMap &params_dict,
const phi::Place &place) const phi::Place &place)
: info_(info), place_(place) { : info_(info), place_(place) {
info_->RemoveDescFeedFetch(); info_->RemoveDescFeedFetch();
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(
...@@ -69,7 +72,7 @@ PEFunction::PEFunction(const std::shared_ptr<FunctionInfo> &info, ...@@ -69,7 +72,7 @@ PEFunction::PEFunction(const std::shared_ptr<FunctionInfo> &info,
CreateGraphAndPE(); CreateGraphAndPE();
} }
void PEFunction::CreateGraphAndPE() { void PEEngine::CreateGraphAndPE() {
framework::details::BuildStrategy build_strategy; framework::details::BuildStrategy build_strategy;
auto execution_strategy = GetExecutionStrategy(place_); auto execution_strategy = GetExecutionStrategy(place_);
...@@ -85,12 +88,12 @@ void PEFunction::CreateGraphAndPE() { ...@@ -85,12 +88,12 @@ void PEFunction::CreateGraphAndPE() {
inner_pe_->SkipMemoryReuse(/*scope_idx=*/0, info_->InputArgNames()); inner_pe_->SkipMemoryReuse(/*scope_idx=*/0, info_->InputArgNames());
} }
std::vector<Tensor> PEFunction::operator()(const std::vector<Tensor> &inputs) { std::vector<Tensor> PEEngine::operator()(const std::vector<Tensor> &inputs) {
auto dense_tensors = utils::ToDenseTensors(inputs); auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors)); return utils::ToTensors(this->operator()(dense_tensors));
} }
std::vector<DenseTensor> PEFunction::operator()( std::vector<DenseTensor> PEEngine::operator()(
const std::vector<DenseTensor> &inputs) { const std::vector<DenseTensor> &inputs) {
utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_); utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
...@@ -109,7 +112,7 @@ std::vector<DenseTensor> PEFunction::operator()( ...@@ -109,7 +112,7 @@ std::vector<DenseTensor> PEFunction::operator()(
return outputs; return outputs;
} }
const std::shared_ptr<FunctionInfo> &PEFunction::Info() const { return info_; } const std::shared_ptr<FunctionInfo> &PEEngine::Info() const { return info_; }
} // namespace jit } // namespace jit
} // namespace paddle } // namespace paddle
...@@ -16,29 +16,36 @@ ...@@ -16,29 +16,36 @@
#include <vector> #include <vector>
#include "paddle/fluid/framework/details/execution_strategy.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/parallel_executor.h"
#include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/jit/function/base_function.h" #include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function_schema.h" #include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/function_utils.h" #include "paddle/fluid/jit/function_utils.h"
namespace paddle { namespace paddle {
namespace jit {
namespace framework {
class ParallelExecutor;
namespace details {
class ExecutionStrategy;
}
namespace ir {
class Graph;
}
} // namespace framework
namespace jit {
using ExecutionStrategy = framework::details::ExecutionStrategy; using ExecutionStrategy = framework::details::ExecutionStrategy;
using ParallelExecutor = framework::ParallelExecutor; using ParallelExecutor = framework::ParallelExecutor;
using Graph = framework::ir::Graph; using Graph = framework::ir::Graph;
class PEFunction : public BaseFunction { class PEEngine : public BaseEngine {
public: public:
PEFunction(const std::shared_ptr<FunctionInfo> &info, PEEngine(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict, const Name2VariableMap &params_dict,
const phi::Place &place); const phi::Place &place);
~PEFunction() noexcept {} ~PEEngine() noexcept {}
void CreateGraphAndPE(); void CreateGraphAndPE();
......
...@@ -12,55 +12,32 @@ ...@@ -12,55 +12,32 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#pragma once #include "paddle/fluid/jit/function.h"
#include <memory> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/variable.h" #include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function_utils.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
class ClassType;
namespace internal {
class Object {
public:
Object(const std::shared_ptr<ClassType>& type, size_t num_slot)
: type_(type) {
slots_.resize(num_slot);
}
static std::unique_ptr<Object> Create(std::shared_ptr<ClassType> type,
size_t num_slot) {
return std::make_unique<Object>(type, num_slot);
}
std::shared_ptr<ClassType> Type() const { return type_; }
void SetSlot(size_t slot, Variable val) {
if (slot >= slots_.size()) {
slots_.resize(slot);
}
slots_[slot] = std::move(val);
}
const Variable& GetSlot(size_t slot) {
// TODO(dev): Add ENFORCE_LT(slot, size());
return slots_[slot];
}
Variable GetAttr(const std::string& name) const; Function::Function(BaseEngine* engine) : engine_(engine) {}
void SetAttr(const std::string& name, Variable val); std::vector<Tensor> Function::operator()(
const std::vector<Tensor>& inputs) const {
auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors));
}
private: std::vector<DenseTensor> Function::operator()(
std::shared_ptr<ClassType> type_; const std::vector<DenseTensor>& inputs) const {
// Store Tensors and Attributes return (*engine_)(inputs);
std::vector<Variable> slots_; }
};
} // namespace internal
} // namespace jit } // namespace jit
} // namespace paddle } // namespace paddle
...@@ -14,45 +14,30 @@ ...@@ -14,45 +14,30 @@
#pragma once #pragma once
#include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/variable.h" #include "paddle/phi/api/include/tensor.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
using Variable = paddle::framework::Variable; class BaseEngine;
class BaseFunction; using DenseTensor = phi::DenseTensor;
class CompilationUnit; using Tensor = paddle::experimental::Tensor;
class ClassType { class Function {
public: public:
ClassType(const std::vector<std::string>& names, explicit Function(BaseEngine* engine);
std::weak_ptr<CompilationUnit> cu)
: const_names_(names), compilation_unit_(cu) {}
static std::shared_ptr<ClassType> Create( std::vector<Tensor> operator()(const std::vector<Tensor>& inputs) const;
const std::vector<std::string>& names,
std::weak_ptr<CompilationUnit> cu) {
return std::make_shared<ClassType>(names, cu);
}
// const std::vector<Function*> Methods() const; std::vector<DenseTensor> operator()(
const std::vector<DenseTensor>& inputs) const;
// const Variable& GetAttribute(size_t slot) const; ~Function() = default;
// const Variable& GetAttribute(const std::string& name) const;
// size_t AddAttribute(const std::string& name, Variable val);
private: private:
// TODO(dev): disingwish parameter and buffer BaseEngine* engine_;
std::vector<std::string> const_names_;
std::vector<Variable> const_value_;
std::vector<BaseFunction*> methods_;
std::vector<BaseFunction*> static_method_;
std::weak_ptr<CompilationUnit> compilation_unit_;
}; };
} // namespace jit } // namespace jit
......
...@@ -58,9 +58,9 @@ void ShareParamsIntoScope(const std::vector<std::string> &param_names, ...@@ -58,9 +58,9 @@ void ShareParamsIntoScope(const std::vector<std::string> &param_names,
void RemoveFeedFetch(framework::ProgramDesc *program_desc); void RemoveFeedFetch(framework::ProgramDesc *program_desc);
template <typename T> template <typename T>
std::shared_ptr<T> MakeFunction(const std::shared_ptr<FunctionInfo> &info, std::shared_ptr<T> MakeEngine(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict, const Name2VariableMap &params_dict,
const phi::Place &place) { const phi::Place &place) {
return std::make_shared<T>(info, params_dict, place); return std::make_shared<T>(info, params_dict, place);
} }
......
...@@ -15,62 +15,68 @@ ...@@ -15,62 +15,68 @@
#include "paddle/fluid/jit/layer.h" #include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/errors.h"
#include "paddle/fluid/jit/compilation_unit.h" #include "paddle/fluid/jit/compilation_unit.h"
#include "paddle/fluid/jit/function/base_function.h" #include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function.h"
#include "paddle/fluid/jit/function_schema.h" #include "paddle/fluid/jit/function_schema.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/errors.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
Layer::Layer(const Name2VariableMap& params_dict, Layer::Layer(const Name2VariableMap& params_map,
const Name2VariableMap& attrs_dict, const Name2VariableMap& attrs_map,
const Name2FunctionInfoMap& info_map,
const phi::Place& place) const phi::Place& place)
: params_dict_(params_dict), attrs_dict_(attrs_dict) { : params_map_(params_map), attrs_map_(attrs_map), info_map_(info_map) {
unit_.reset(new CompilationUnit()); unit_.reset(new CompilationUnit());
} }
std::shared_ptr<BaseFunction> Layer::Function(const std::string& name) const { jit::Function Layer::Function(const std::string& name) const {
return unit_->Function(name); return jit::Function(unit_->GetEngine(name).get());
} }
std::vector<Tensor> Layer::forward(const std::vector<Tensor>& inputs) { std::vector<Tensor> Layer::forward(const std::vector<Tensor>& inputs) {
auto func = Function("forward"); auto func = this->Function("forward");
return (*func)(inputs); return func(inputs);
} }
std::vector<DenseTensor> Layer::forward( std::vector<DenseTensor> Layer::forward(
const std::vector<DenseTensor>& inputs) { const std::vector<DenseTensor>& inputs) {
auto func = Function("forward"); auto func = this->Function("forward");
return (*func)(inputs); return func(inputs);
} }
void Layer::to(const phi::Place& place) {} void Layer::to(const phi::Place& place) {}
void Layer::SetFunction(const std::string& name, void Layer::SetEngine(const std::string& name,
const std::shared_ptr<BaseFunction>& function) { const std::shared_ptr<BaseEngine>& engine) {
unit_->SetFunction(name, function); unit_->SetEngine(name, engine);
} }
std::vector<std::string> Layer::FunctionNames() const { const Name2EngineMap& Layer::EngineMap() const { return unit_->EngineMap(); }
return unit_->FunctionNames();
}
const Name2FunctionMap& Layer::FunctionMap() const { const std::shared_ptr<jit::FunctionInfo>& Layer::FunctionInfo(
return unit_->FunctionMap(); const std::string& name) const {
PADDLE_ENFORCE_EQ(
info_map_.count(name),
1,
phi::errors::InvalidArgument(
"FuncitonInfo named %s is not exist in info_map_.", name));
return info_map_.at(name);
} }
#define PD_SPECIALZE_ATTRIBUTE_TYPE(T) \ #define PD_SPECIALZE_ATTRIBUTE_TYPE(T) \
template <> \ template <> \
T Layer::Attribute<T>(const std::string& name) const { \ T Layer::Attribute<T>(const std::string& name) const { \
if (attrs_dict_.find(name) == attrs_dict_.end()) { \ if (attrs_map_.find(name) == attrs_map_.end()) { \
PADDLE_THROW(phi::errors::NotFound( \ PADDLE_THROW(phi::errors::NotFound( \
"Attribute can not found %s, please check if it exists.")); \ "Attribute can not found %s, please check if it exists.")); \
return T(); \ return T(); \
} \ } \
auto var = attrs_dict_.at(name); \ auto var = attrs_map_.at(name); \
T ret = var->Get<T>(); \ T ret = var->Get<T>(); \
return ret; \ return ret; \
} }
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#pragma once #pragma once
#include <memory>
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
...@@ -21,7 +22,7 @@ ...@@ -21,7 +22,7 @@
#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/common/place.h" #include "paddle/phi/common/place.h"
#include "function/base_function.h" //NOLINT #include "function.h" //NOLINT
namespace paddle { namespace paddle {
...@@ -31,22 +32,26 @@ class Variable; ...@@ -31,22 +32,26 @@ class Variable;
namespace jit { namespace jit {
class CompilationUnit; class CompilationUnit;
class FunctionInfo;
using DenseTensor = phi::DenseTensor; using DenseTensor = phi::DenseTensor;
using Tensor = paddle::experimental::Tensor; using Tensor = paddle::experimental::Tensor;
using Variable = paddle::framework::Variable; using Variable = paddle::framework::Variable;
using Name2VariableMap = using Name2VariableMap =
std::unordered_map<std::string, std::shared_ptr<Variable>>; std::unordered_map<std::string, std::shared_ptr<Variable>>;
using Name2FunctionMap = using Name2EngineMap =
std::unordered_map<std::string, std::shared_ptr<BaseFunction>>; std::unordered_map<std::string, std::shared_ptr<BaseEngine>>;
using Name2FunctionInfoMap =
std::unordered_map<std::string, std::shared_ptr<FunctionInfo>>;
class Layer { class Layer {
public: public:
Layer(const Name2VariableMap& params_dict, Layer(const Name2VariableMap& params_map,
const Name2VariableMap& attrs_dict_, const Name2VariableMap& attrs_map_,
const Name2FunctionInfoMap& info_map,
const phi::Place& place); const phi::Place& place);
std::shared_ptr<BaseFunction> Function(const std::string& name) const; jit::Function Function(const std::string& name) const;
template <typename T> template <typename T>
T Attribute(const std::string& name) const; T Attribute(const std::string& name) const;
...@@ -57,16 +62,18 @@ class Layer { ...@@ -57,16 +62,18 @@ class Layer {
void to(const phi::Place& place); void to(const phi::Place& place);
void SetFunction(const std::string& name, void SetEngine(const std::string& name,
const std::shared_ptr<BaseFunction>& function); const std::shared_ptr<BaseEngine>& engine);
std::vector<std::string> FunctionNames() const; const Name2EngineMap& EngineMap() const;
const Name2FunctionMap& FunctionMap() const; const std::shared_ptr<jit::FunctionInfo>& FunctionInfo(
const std::string& name) const;
private: private:
Name2VariableMap params_dict_; Name2VariableMap params_map_;
Name2VariableMap attrs_dict_; Name2VariableMap attrs_map_;
Name2FunctionInfoMap info_map_;
std::shared_ptr<CompilationUnit> unit_; std::shared_ptr<CompilationUnit> unit_;
}; };
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/fluid/jit/function.h"
#include "paddle/fluid/jit/function_utils.h" #include "paddle/fluid/jit/function_utils.h"
#include "paddle/fluid/jit/layer.h" #include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/serializer.h" #include "paddle/fluid/jit/serializer.h"
...@@ -102,7 +103,7 @@ TEST(CpuLayerTest, Construct) { ...@@ -102,7 +103,7 @@ TEST(CpuLayerTest, Construct) {
EXPECT_NEAR(out_data[0], 0.02194316, 1e-6); EXPECT_NEAR(out_data[0], 0.02194316, 1e-6);
auto func = layer.Function("infer"); auto func = layer.Function("infer");
outs = (*func)(inputs); outs = func(inputs);
out_data = outs[0].data<float>(); out_data = outs[0].data<float>();
EXPECT_NEAR(out_data[0], 1.41562390, 1e-6); EXPECT_NEAR(out_data[0], 1.41562390, 1e-6);
auto pow_out = auto pow_out =
...@@ -127,7 +128,7 @@ TEST(GpuLayerTest, Construct) { ...@@ -127,7 +128,7 @@ TEST(GpuLayerTest, Construct) {
EXPECT_NEAR(out_data[0], 0.02194316, 1e-6); EXPECT_NEAR(out_data[0], 0.02194316, 1e-6);
auto func = layer.Function("infer"); auto func = layer.Function("infer");
outs = (*func)(inputs); outs = func(inputs);
gpu_tensor = outs[0]; gpu_tensor = outs[0];
cpu_tensor = paddle::experimental::copy_to(gpu_tensor, phi::CPUPlace(), true); cpu_tensor = paddle::experimental::copy_to(gpu_tensor, phi::CPUPlace(), true);
out_data = cpu_tensor.data<float>(); out_data = cpu_tensor.data<float>();
......
...@@ -20,8 +20,8 @@ ...@@ -20,8 +20,8 @@
#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/jit/function/executor_function.h" #include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/jit/function/pe_function.h" #include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/jit/layer.h" #include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/property.h" #include "paddle/fluid/jit/property.h"
#include "paddle/fluid/jit/serializer_utils.h" #include "paddle/fluid/jit/serializer_utils.h"
...@@ -30,18 +30,18 @@ DECLARE_string(jit_engine_type); ...@@ -30,18 +30,18 @@ DECLARE_string(jit_engine_type);
namespace paddle { namespace paddle {
namespace jit { namespace jit {
using Name2FunctionInfoMap =
std::unordered_map<std::string, std::shared_ptr<FunctionInfo>>;
Layer Deserializer::operator()(const std::string& path, Layer Deserializer::operator()(const std::string& path,
const phi::Place& place) { const phi::Place& place) {
const auto& pdmodel_paths = utils::PdmodelFilePaths(path); const auto& pdmodel_paths = utils::PdmodelFilePaths(path);
// set is ordered // set is ordered
std::set<std::string> param_names_set; std::set<std::string> param_names_set;
std::vector<std::shared_ptr<FunctionInfo>> infos; Name2FunctionInfoMap info_map;
for (auto& it : pdmodel_paths) { for (auto& it : pdmodel_paths) {
auto& func_name = it.first; auto& func_name = it.first;
auto program_desc = LoadProgram(it.second); auto program_desc = LoadProgram(it.second);
// TODO(dev): load int/float attrs
std::vector<std::string> persist_var_names; std::vector<std::string> persist_var_names;
auto all_var_desc = program_desc.Block(0).AllVars(); auto all_var_desc = program_desc.Block(0).AllVars();
for (auto* desc_ptr : all_var_desc) { for (auto* desc_ptr : all_var_desc) {
...@@ -51,8 +51,8 @@ Layer Deserializer::operator()(const std::string& path, ...@@ -51,8 +51,8 @@ Layer Deserializer::operator()(const std::string& path,
} }
param_names_set.insert(persist_var_names.begin(), persist_var_names.end()); param_names_set.insert(persist_var_names.begin(), persist_var_names.end());
infos.emplace_back(std::make_shared<FunctionInfo>( info_map[func_name] = std::make_shared<FunctionInfo>(
func_name, persist_var_names, program_desc)); func_name, persist_var_names, program_desc);
} }
Name2VariableMap params_dict; Name2VariableMap params_dict;
...@@ -64,23 +64,23 @@ Layer Deserializer::operator()(const std::string& path, ...@@ -64,23 +64,23 @@ Layer Deserializer::operator()(const std::string& path,
VLOG(3) << "Read Property Success!"; VLOG(3) << "Read Property Success!";
} }
Layer layer = Layer(params_dict, attrs_dict, place); Layer layer = Layer(params_dict, attrs_dict, info_map, place);
for (auto& info : infos) { for (auto it = info_map.begin(); it != info_map.end(); ++it) {
const std::string& func_name = it->first;
auto& info = it->second;
if (FLAGS_jit_engine_type == "Executor") { if (FLAGS_jit_engine_type == "Executor") {
VLOG(3) << "Add function type: ExecutorFunction. name: " VLOG(3) << "Add function type: ExecutorEngine. Function name: "
<< info->FunctionName(); << func_name;
layer.SetFunction( layer.SetEngine(
info->FunctionName(), func_name,
utils::MakeFunction<ExecutorFunction>(info, params_dict, place)); utils::MakeEngine<ExecutorEngine>(info, params_dict, place));
} else if (FLAGS_jit_engine_type == "PE") { } else if (FLAGS_jit_engine_type == "PE") {
VLOG(3) << "Add function type: PEFunction. name: " VLOG(3) << "Add function type: PEEngine. Function name: " << func_name;
<< info->FunctionName(); layer.SetEngine(func_name,
layer.SetFunction( utils::MakeEngine<PEEngine>(info, params_dict, place));
info->FunctionName(),
utils::MakeFunction<PEFunction>(info, params_dict, place));
} else { } else {
PD_THROW("Invalid JitLayer funciton type."); PD_THROW("Invalid JitLayer engine type.");
} }
} }
......
...@@ -1006,8 +1006,8 @@ PADDLE_DEFINE_EXPORTED_bool( ...@@ -1006,8 +1006,8 @@ PADDLE_DEFINE_EXPORTED_bool(
* default=PE * default=PE
* Example: * Example:
* Note: * Note:
* FLAGS_jit_engine_type == Executor, using ExecutorFunction by default * FLAGS_jit_engine_type == Executor, using ExecutorEngine by default
* FLAGS_jit_engine_type == PE, using PEFunction by default * FLAGS_jit_engine_type == PE, using PEEngine by default
*/ */
PADDLE_DEFINE_EXPORTED_string(jit_engine_type, PADDLE_DEFINE_EXPORTED_string(jit_engine_type,
"PE", "PE",
......
...@@ -372,8 +372,8 @@ static PyObject* eager_api_jit_function_call(PyObject* self, ...@@ -372,8 +372,8 @@ static PyObject* eager_api_jit_function_call(PyObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
std::shared_ptr<jit::BaseFunction> function = std::shared_ptr<jit::BaseEngine> function =
CastPyArg2BaseFunction(PyTuple_GET_ITEM(args, 0), 0); CastPyArg2BaseEngine(PyTuple_GET_ITEM(args, 0), 0);
std::vector<paddle::experimental::Tensor> ins = std::vector<paddle::experimental::Tensor> ins =
CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1); CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
std::vector<paddle::experimental::Tensor> outs = (*function)(ins); std::vector<paddle::experimental::Tensor> outs = (*function)(ins);
......
...@@ -22,8 +22,8 @@ limitations under the License. */ ...@@ -22,8 +22,8 @@ limitations under the License. */
#include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/scope_guard.h" #include "paddle/fluid/framework/scope_guard.h"
#include "paddle/fluid/jit/function/executor_function.h" #include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/jit/function/pe_function.h" #include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/operators/py_func_op.h" #include "paddle/fluid/operators/py_func_op.h"
#include "paddle/fluid/operators/utils.h" #include "paddle/fluid/operators/utils.h"
...@@ -54,8 +54,8 @@ extern PyTypeObject* g_customplace_pytype; ...@@ -54,8 +54,8 @@ extern PyTypeObject* g_customplace_pytype;
extern PyTypeObject* g_framework_tensor_pytype; extern PyTypeObject* g_framework_tensor_pytype;
extern PyTypeObject* g_framework_lodtensorarray_pytype; extern PyTypeObject* g_framework_lodtensorarray_pytype;
extern PyTypeObject* g_custom_op_kernel_ctx_pytype; extern PyTypeObject* g_custom_op_kernel_ctx_pytype;
extern PyTypeObject* g_executor_function_pytype; extern PyTypeObject* g_executor_engine_pytype;
extern PyTypeObject* g_pe_function_pytype; extern PyTypeObject* g_pe_engine_pytype;
int TensorDtype2NumpyDtype(phi::DataType dtype) { int TensorDtype2NumpyDtype(phi::DataType dtype) {
switch (dtype) { switch (dtype) {
...@@ -232,19 +232,18 @@ std::shared_ptr<imperative::VarBase> CastPyArg2VarBase(PyObject* obj, ...@@ -232,19 +232,18 @@ std::shared_ptr<imperative::VarBase> CastPyArg2VarBase(PyObject* obj,
return py::cast<std::shared_ptr<imperative::VarBase>>(obj); return py::cast<std::shared_ptr<imperative::VarBase>>(obj);
} }
std::shared_ptr<jit::BaseFunction> CastPyArg2BaseFunction(PyObject* obj, std::shared_ptr<jit::BaseEngine> CastPyArg2BaseEngine(PyObject* obj,
ssize_t arg_pos) { ssize_t arg_pos) {
if (PyObject_IsInstance( if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_executor_function_pytype))) { obj, reinterpret_cast<PyObject*>(g_executor_engine_pytype))) {
return ::pybind11::handle(obj) return ::pybind11::handle(obj).cast<std::shared_ptr<jit::ExecutorEngine>>();
.cast<std::shared_ptr<jit::ExecutorFunction>>();
} else if (PyObject_IsInstance( } else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_pe_function_pytype))) { obj, reinterpret_cast<PyObject*>(g_pe_engine_pytype))) {
return ::pybind11::handle(obj).cast<std::shared_ptr<jit::PEFunction>>(); return ::pybind11::handle(obj).cast<std::shared_ptr<jit::PEEngine>>();
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be " "argument (position %d) must be "
"BaseFunction, but got %s", "BaseEngine, but got %s",
arg_pos + 1, arg_pos + 1,
reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name)); reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
} }
......
...@@ -20,7 +20,7 @@ typedef SSIZE_T ssize_t; ...@@ -20,7 +20,7 @@ typedef SSIZE_T ssize_t;
#include "paddle/fluid/eager/hooks.h" #include "paddle/fluid/eager/hooks.h"
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/jit/function/base_function.h" #include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/phi/common/backend.h" #include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h" #include "paddle/phi/common/data_type.h"
...@@ -75,8 +75,8 @@ framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, ...@@ -75,8 +75,8 @@ framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj,
std::unordered_map<std::wstring, int> CastPyArg2Vocab(PyObject* obj, std::unordered_map<std::wstring, int> CastPyArg2Vocab(PyObject* obj,
ssize_t arg_pos); ssize_t arg_pos);
std::vector<std::string> CastPyArg2Strings(PyObject* obj, ssize_t arg_pos); std::vector<std::string> CastPyArg2Strings(PyObject* obj, ssize_t arg_pos);
std::shared_ptr<jit::BaseFunction> CastPyArg2BaseFunction(PyObject* obj, std::shared_ptr<jit::BaseEngine> CastPyArg2BaseEngine(PyObject* obj,
ssize_t arg_pos); ssize_t arg_pos);
PyObject* ToPyObject(int value); PyObject* ToPyObject(int value);
PyObject* ToPyObject(uint32_t value); PyObject* ToPyObject(uint32_t value);
......
...@@ -18,8 +18,8 @@ limitations under the License. */ ...@@ -18,8 +18,8 @@ limitations under the License. */
#include "paddle/fluid/imperative/layer.h" #include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/fluid/jit/function/executor_function.h" #include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/jit/function/pe_function.h" #include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/jit/function_schema.h" #include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/layer.h" #include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/serializer.h" #include "paddle/fluid/jit/serializer.h"
...@@ -29,27 +29,26 @@ namespace py = pybind11; ...@@ -29,27 +29,26 @@ namespace py = pybind11;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
PyTypeObject *g_executor_function_pytype = nullptr; PyTypeObject *g_executor_engine_pytype = nullptr;
PyTypeObject *g_pe_function_pytype = nullptr; PyTypeObject *g_pe_engine_pytype = nullptr;
using Variable = paddle::framework::Variable; using Variable = paddle::framework::Variable;
void BindJit(pybind11::module *m) { void BindJit(pybind11::module *m) {
py::class_<jit::Layer>(*m, "Layer", R"DOC(Layer Class.)DOC") py::class_<jit::Layer>(*m, "Layer", R"DOC(Layer Class.)DOC")
.def("function_dict", .def("function_dict",
&jit::Layer::FunctionMap, &jit::Layer::EngineMap,
py::return_value_policy::reference); py::return_value_policy::reference);
py::class_<jit::ExecutorFunction, std::shared_ptr<jit::ExecutorFunction>> py::class_<jit::ExecutorEngine, std::shared_ptr<jit::ExecutorEngine>>
executor_function( executor_engine(*m, "ExecutorEngine", R"DOC(ExecutorEngine Class.)DOC");
*m, "ExectorFunction", R"DOC(ExectorFunction Class.)DOC"); g_executor_engine_pytype =
g_executor_function_pytype = reinterpret_cast<PyTypeObject *>(executor_engine.ptr());
reinterpret_cast<PyTypeObject *>(executor_function.ptr()); executor_engine.def("info", &jit::ExecutorEngine::Info);
executor_function.def("info", &jit::ExecutorFunction::Info);
py::class_<jit::PEEngine, std::shared_ptr<jit::PEEngine>> pe_engine(
py::class_<jit::PEFunction, std::shared_ptr<jit::PEFunction>> pe_function( *m, "PEEngine", R"DOC(PEEngine Class.)DOC");
*m, "PEFunction", R"DOC(PEFunction Class.)DOC"); g_pe_engine_pytype = reinterpret_cast<PyTypeObject *>(pe_engine.ptr());
g_pe_function_pytype = reinterpret_cast<PyTypeObject *>(pe_function.ptr()); pe_engine.def("info", &jit::PEEngine::Info);
pe_function.def("info", &jit::PEFunction::Info);
py::class_<jit::FunctionInfo, std::shared_ptr<jit::FunctionInfo>>( py::class_<jit::FunctionInfo, std::shared_ptr<jit::FunctionInfo>>(
*m, "FunctionInfo", R"DOC(FunctionInfo Class.)DOC") *m, "FunctionInfo", R"DOC(FunctionInfo Class.)DOC")
......
...@@ -628,7 +628,7 @@ headers = ( ...@@ -628,7 +628,7 @@ headers = (
# utils api headers # utils api headers
list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/utils', recursive=True))) # paddle utils headers list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/utils', recursive=True))) # paddle utils headers
jit_layer_headers = ['layer.h', 'serializer.h', 'serializer_utils.h', 'all.h', 'base_function.h'] jit_layer_headers = ['layer.h', 'serializer.h', 'serializer_utils.h', 'all.h', 'function.h']
for f in jit_layer_headers: for f in jit_layer_headers:
headers += list(find_files(f, '@PADDLE_SOURCE_DIR@/paddle/fluid/jit', recursive=True)) headers += list(find_files(f, '@PADDLE_SOURCE_DIR@/paddle/fluid/jit', recursive=True))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册