未验证 提交 ede0990f 编写于 作者: W WangZhen 提交者: GitHub

[JitLayer]Rename Function to Engine and using new Function class to warp Engine (#44900)

* Polish function code

* Rename funciton to engine

* Fix Log msg and doc

* Rename Function to Engine and using new Function class to warp Engine

* Rename EngineInfo

* Adjust member variable order
上级 80cc4f0d
add_subdirectory(function)
add_subdirectory(engine)
proto_library(paddle_jit_property_proto SRCS property.proto)
cc_library(
......@@ -31,6 +31,11 @@ cc_library(
SRCS function_schema.cc
DEPS jit_function_utils)
cc_library(
jit_function
SRCS function.cc
DEPS jit_function_utils jit_executor_engine jit_pe_engine)
cc_library(
jit_layer
SRCS layer.cc
......@@ -39,8 +44,9 @@ cc_library(
jit_serializer_utils
jit_compilation_unit
jit_function_schema
jit_executor_function
jit_pe_function)
jit_executor_engine
jit_pe_engine
jit_function)
if(WITH_TESTING AND NOT WIN32)
add_custom_target(
......
......@@ -14,7 +14,7 @@
#pragma once
#include "function/base_function.h" // NOLINT
#include "function.h" //NOLINT
#include "layer.h" // NOLINT
#include "serializer.h" // NOLINT
#include "serializer_utils.h" // NOLINT
......@@ -16,37 +16,27 @@
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/jit/function/base_function.h"
#include "paddle/fluid/jit/engine/base_engine.h"
namespace paddle {
namespace jit {
std::shared_ptr<BaseFunction> CompilationUnit::Function(
std::shared_ptr<BaseEngine> CompilationUnit::GetEngine(
const std::string &name) const {
PADDLE_ENFORCE_EQ(
function_map_.count(name),
engine_map_.count(name),
1,
phi::errors::InvalidArgument(
"Funciton name %s is not exist in function_map_.", name));
return function_map_.at(name);
"Funciton named %s is not exist in engine_map_.", name));
return engine_map_.at(name);
}
void CompilationUnit::SetFunction(
const std::string &name, const std::shared_ptr<BaseFunction> &function) {
function_map_[name] = function;
void CompilationUnit::SetEngine(const std::string &name,
const std::shared_ptr<BaseEngine> &engine) {
engine_map_[name] = engine;
}
std::vector<std::string> CompilationUnit::FunctionNames() const {
std::vector<std::string> names;
for (auto it = function_map_.begin(); it != function_map_.end(); it++) {
names.emplace_back(it->first);
}
return names;
}
const Name2FunctionMap &CompilationUnit::FunctionMap() const {
return function_map_;
}
const Name2EngineMap &CompilationUnit::EngineMap() const { return engine_map_; }
} // namespace jit
} // namespace paddle
......@@ -21,26 +21,24 @@
namespace paddle {
namespace jit {
class BaseFunction;
using Name2FunctionMap =
std::unordered_map<std::string, std::shared_ptr<BaseFunction>>;
class BaseEngine;
using Name2EngineMap =
std::unordered_map<std::string, std::shared_ptr<BaseEngine>>;
class CompilationUnit {
public:
CompilationUnit() = default;
~CompilationUnit() {}
std::shared_ptr<BaseFunction> Function(const std::string &name) const;
std::shared_ptr<BaseEngine> GetEngine(const std::string &name) const;
void SetFunction(const std::string &name,
const std::shared_ptr<BaseFunction> &function);
void SetEngine(const std::string &name,
const std::shared_ptr<BaseEngine> &engine);
std::vector<std::string> FunctionNames() const;
const Name2FunctionMap &FunctionMap() const;
const Name2EngineMap &EngineMap() const;
private:
Name2FunctionMap function_map_;
Name2EngineMap engine_map_;
};
} // namespace jit
......
cc_library(
jit_executor_function
SRCS executor_function.cc
jit_executor_engine
SRCS executor_engine.cc
DEPS executor)
cc_library(
jit_pe_function
SRCS pe_function.cc
jit_pe_engine
SRCS pe_engine.cc
DEPS parallel_executor)
......@@ -22,14 +22,14 @@ namespace jit {
using Tensor = paddle::experimental::Tensor;
using DenseTensor = phi::DenseTensor;
class BaseFunction {
class BaseEngine {
public:
virtual std::vector<DenseTensor> operator()(
const std::vector<DenseTensor> &inputs) = 0;
virtual std::vector<Tensor> operator()(const std::vector<Tensor> &inputs) = 0;
virtual ~BaseFunction() {}
virtual ~BaseEngine() {}
};
} // namespace jit
......
......@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/jit/function/executor_function.h"
#include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/variable.h"
......@@ -21,7 +21,7 @@
namespace paddle {
namespace jit {
ExecutorFunction::ExecutorFunction(const std::shared_ptr<FunctionInfo> &info,
ExecutorEngine::ExecutorEngine(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict,
const phi::Place &place)
: info_(info), place_(place), inner_exe_(place_) {
......@@ -35,13 +35,13 @@ ExecutorFunction::ExecutorFunction(const std::shared_ptr<FunctionInfo> &info,
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_);
}
std::vector<Tensor> ExecutorFunction::operator()(
std::vector<Tensor> ExecutorEngine::operator()(
const std::vector<Tensor> &inputs) {
auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors));
}
std::vector<DenseTensor> ExecutorFunction::operator()(
std::vector<DenseTensor> ExecutorEngine::operator()(
const std::vector<DenseTensor> &inputs) {
utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
inner_exe_.Run(info_->ProgramDesc(),
......@@ -55,7 +55,7 @@ std::vector<DenseTensor> ExecutorFunction::operator()(
return outputs;
}
const std::shared_ptr<FunctionInfo> &ExecutorFunction::Info() const {
const std::shared_ptr<FunctionInfo> &ExecutorEngine::Info() const {
return info_;
}
......
......@@ -19,20 +19,20 @@
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/jit/function/base_function.h"
#include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/function_utils.h"
namespace paddle {
namespace jit {
class ExecutorFunction : public BaseFunction {
class ExecutorEngine : public BaseEngine {
public:
ExecutorFunction(const std::shared_ptr<FunctionInfo> &info,
ExecutorEngine(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict,
const phi::Place &place);
~ExecutorFunction() noexcept {}
~ExecutorEngine() noexcept {}
std::vector<Tensor> operator()(const std::vector<Tensor> &inputs);
......
......@@ -12,10 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/jit/function/pe_function.h"
#include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/details/build_strategy.h"
#include "paddle/fluid/framework/details/execution_strategy.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/parallel_executor.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/phi/core/enforce.h"
......@@ -54,7 +57,7 @@ static ExecutionStrategy GetExecutionStrategy(const platform::Place &place) {
return execution_strategy;
}
PEFunction::PEFunction(const std::shared_ptr<FunctionInfo> &info,
PEEngine::PEEngine(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict,
const phi::Place &place)
: info_(info), place_(place) {
......@@ -69,7 +72,7 @@ PEFunction::PEFunction(const std::shared_ptr<FunctionInfo> &info,
CreateGraphAndPE();
}
void PEFunction::CreateGraphAndPE() {
void PEEngine::CreateGraphAndPE() {
framework::details::BuildStrategy build_strategy;
auto execution_strategy = GetExecutionStrategy(place_);
......@@ -85,12 +88,12 @@ void PEFunction::CreateGraphAndPE() {
inner_pe_->SkipMemoryReuse(/*scope_idx=*/0, info_->InputArgNames());
}
std::vector<Tensor> PEFunction::operator()(const std::vector<Tensor> &inputs) {
std::vector<Tensor> PEEngine::operator()(const std::vector<Tensor> &inputs) {
auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors));
}
std::vector<DenseTensor> PEFunction::operator()(
std::vector<DenseTensor> PEEngine::operator()(
const std::vector<DenseTensor> &inputs) {
utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
......@@ -109,7 +112,7 @@ std::vector<DenseTensor> PEFunction::operator()(
return outputs;
}
const std::shared_ptr<FunctionInfo> &PEFunction::Info() const { return info_; }
const std::shared_ptr<FunctionInfo> &PEEngine::Info() const { return info_; }
} // namespace jit
} // namespace paddle
......@@ -16,29 +16,36 @@
#include <vector>
#include "paddle/fluid/framework/details/execution_strategy.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/parallel_executor.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/jit/function/base_function.h"
#include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/function_utils.h"
namespace paddle {
namespace jit {
namespace framework {
class ParallelExecutor;
namespace details {
class ExecutionStrategy;
}
namespace ir {
class Graph;
}
} // namespace framework
namespace jit {
using ExecutionStrategy = framework::details::ExecutionStrategy;
using ParallelExecutor = framework::ParallelExecutor;
using Graph = framework::ir::Graph;
class PEFunction : public BaseFunction {
class PEEngine : public BaseEngine {
public:
PEFunction(const std::shared_ptr<FunctionInfo> &info,
PEEngine(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict,
const phi::Place &place);
~PEFunction() noexcept {}
~PEEngine() noexcept {}
void CreateGraphAndPE();
......
......@@ -12,55 +12,32 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/jit/function.h"
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/framework/variable.h"
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function_utils.h"
namespace paddle {
namespace jit {
class ClassType;
namespace internal {
class Object {
public:
Object(const std::shared_ptr<ClassType>& type, size_t num_slot)
: type_(type) {
slots_.resize(num_slot);
}
static std::unique_ptr<Object> Create(std::shared_ptr<ClassType> type,
size_t num_slot) {
return std::make_unique<Object>(type, num_slot);
}
std::shared_ptr<ClassType> Type() const { return type_; }
void SetSlot(size_t slot, Variable val) {
if (slot >= slots_.size()) {
slots_.resize(slot);
}
slots_[slot] = std::move(val);
}
const Variable& GetSlot(size_t slot) {
// TODO(dev): Add ENFORCE_LT(slot, size());
return slots_[slot];
}
Variable GetAttr(const std::string& name) const;
Function::Function(BaseEngine* engine) : engine_(engine) {}
void SetAttr(const std::string& name, Variable val);
std::vector<Tensor> Function::operator()(
const std::vector<Tensor>& inputs) const {
auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors));
}
private:
std::shared_ptr<ClassType> type_;
// Store Tensors and Attributes
std::vector<Variable> slots_;
};
std::vector<DenseTensor> Function::operator()(
const std::vector<DenseTensor>& inputs) const {
return (*engine_)(inputs);
}
} // namespace internal
} // namespace jit
} // namespace paddle
......@@ -14,45 +14,30 @@
#pragma once
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/framework/variable.h"
#include "paddle/phi/api/include/tensor.h"
namespace paddle {
namespace jit {
using Variable = paddle::framework::Variable;
class BaseFunction;
class CompilationUnit;
class BaseEngine;
using DenseTensor = phi::DenseTensor;
using Tensor = paddle::experimental::Tensor;
class ClassType {
class Function {
public:
ClassType(const std::vector<std::string>& names,
std::weak_ptr<CompilationUnit> cu)
: const_names_(names), compilation_unit_(cu) {}
explicit Function(BaseEngine* engine);
static std::shared_ptr<ClassType> Create(
const std::vector<std::string>& names,
std::weak_ptr<CompilationUnit> cu) {
return std::make_shared<ClassType>(names, cu);
}
std::vector<Tensor> operator()(const std::vector<Tensor>& inputs) const;
// const std::vector<Function*> Methods() const;
std::vector<DenseTensor> operator()(
const std::vector<DenseTensor>& inputs) const;
// const Variable& GetAttribute(size_t slot) const;
// const Variable& GetAttribute(const std::string& name) const;
// size_t AddAttribute(const std::string& name, Variable val);
~Function() = default;
private:
// TODO(dev): disingwish parameter and buffer
std::vector<std::string> const_names_;
std::vector<Variable> const_value_;
std::vector<BaseFunction*> methods_;
std::vector<BaseFunction*> static_method_;
std::weak_ptr<CompilationUnit> compilation_unit_;
BaseEngine* engine_;
};
} // namespace jit
......
......@@ -58,7 +58,7 @@ void ShareParamsIntoScope(const std::vector<std::string> &param_names,
void RemoveFeedFetch(framework::ProgramDesc *program_desc);
template <typename T>
std::shared_ptr<T> MakeFunction(const std::shared_ptr<FunctionInfo> &info,
std::shared_ptr<T> MakeEngine(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict,
const phi::Place &place) {
return std::make_shared<T>(info, params_dict, place);
......
......@@ -15,62 +15,68 @@
#include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/errors.h"
#include "paddle/fluid/jit/compilation_unit.h"
#include "paddle/fluid/jit/function/base_function.h"
#include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/errors.h"
namespace paddle {
namespace jit {
Layer::Layer(const Name2VariableMap& params_dict,
const Name2VariableMap& attrs_dict,
Layer::Layer(const Name2VariableMap& params_map,
const Name2VariableMap& attrs_map,
const Name2FunctionInfoMap& info_map,
const phi::Place& place)
: params_dict_(params_dict), attrs_dict_(attrs_dict) {
: params_map_(params_map), attrs_map_(attrs_map), info_map_(info_map) {
unit_.reset(new CompilationUnit());
}
std::shared_ptr<BaseFunction> Layer::Function(const std::string& name) const {
return unit_->Function(name);
jit::Function Layer::Function(const std::string& name) const {
return jit::Function(unit_->GetEngine(name).get());
}
std::vector<Tensor> Layer::forward(const std::vector<Tensor>& inputs) {
auto func = Function("forward");
return (*func)(inputs);
auto func = this->Function("forward");
return func(inputs);
}
std::vector<DenseTensor> Layer::forward(
const std::vector<DenseTensor>& inputs) {
auto func = Function("forward");
return (*func)(inputs);
auto func = this->Function("forward");
return func(inputs);
}
void Layer::to(const phi::Place& place) {}
void Layer::SetFunction(const std::string& name,
const std::shared_ptr<BaseFunction>& function) {
unit_->SetFunction(name, function);
void Layer::SetEngine(const std::string& name,
const std::shared_ptr<BaseEngine>& engine) {
unit_->SetEngine(name, engine);
}
std::vector<std::string> Layer::FunctionNames() const {
return unit_->FunctionNames();
}
const Name2EngineMap& Layer::EngineMap() const { return unit_->EngineMap(); }
const Name2FunctionMap& Layer::FunctionMap() const {
return unit_->FunctionMap();
const std::shared_ptr<jit::FunctionInfo>& Layer::FunctionInfo(
const std::string& name) const {
PADDLE_ENFORCE_EQ(
info_map_.count(name),
1,
phi::errors::InvalidArgument(
"FuncitonInfo named %s is not exist in info_map_.", name));
return info_map_.at(name);
}
#define PD_SPECIALZE_ATTRIBUTE_TYPE(T) \
template <> \
T Layer::Attribute<T>(const std::string& name) const { \
if (attrs_dict_.find(name) == attrs_dict_.end()) { \
if (attrs_map_.find(name) == attrs_map_.end()) { \
PADDLE_THROW(phi::errors::NotFound( \
"Attribute can not found %s, please check if it exists.")); \
return T(); \
} \
auto var = attrs_dict_.at(name); \
auto var = attrs_map_.at(name); \
T ret = var->Get<T>(); \
return ret; \
}
......
......@@ -14,6 +14,7 @@
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
......@@ -21,7 +22,7 @@
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/common/place.h"
#include "function/base_function.h" //NOLINT
#include "function.h" //NOLINT
namespace paddle {
......@@ -31,22 +32,26 @@ class Variable;
namespace jit {
class CompilationUnit;
class FunctionInfo;
using DenseTensor = phi::DenseTensor;
using Tensor = paddle::experimental::Tensor;
using Variable = paddle::framework::Variable;
using Name2VariableMap =
std::unordered_map<std::string, std::shared_ptr<Variable>>;
using Name2FunctionMap =
std::unordered_map<std::string, std::shared_ptr<BaseFunction>>;
using Name2EngineMap =
std::unordered_map<std::string, std::shared_ptr<BaseEngine>>;
using Name2FunctionInfoMap =
std::unordered_map<std::string, std::shared_ptr<FunctionInfo>>;
class Layer {
public:
Layer(const Name2VariableMap& params_dict,
const Name2VariableMap& attrs_dict_,
Layer(const Name2VariableMap& params_map,
const Name2VariableMap& attrs_map_,
const Name2FunctionInfoMap& info_map,
const phi::Place& place);
std::shared_ptr<BaseFunction> Function(const std::string& name) const;
jit::Function Function(const std::string& name) const;
template <typename T>
T Attribute(const std::string& name) const;
......@@ -57,16 +62,18 @@ class Layer {
void to(const phi::Place& place);
void SetFunction(const std::string& name,
const std::shared_ptr<BaseFunction>& function);
void SetEngine(const std::string& name,
const std::shared_ptr<BaseEngine>& engine);
std::vector<std::string> FunctionNames() const;
const Name2EngineMap& EngineMap() const;
const Name2FunctionMap& FunctionMap() const;
const std::shared_ptr<jit::FunctionInfo>& FunctionInfo(
const std::string& name) const;
private:
Name2VariableMap params_dict_;
Name2VariableMap attrs_dict_;
Name2VariableMap params_map_;
Name2VariableMap attrs_map_;
Name2FunctionInfoMap info_map_;
std::shared_ptr<CompilationUnit> unit_;
};
......
......@@ -26,6 +26,7 @@
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/fluid/jit/function.h"
#include "paddle/fluid/jit/function_utils.h"
#include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/serializer.h"
......@@ -102,7 +103,7 @@ TEST(CpuLayerTest, Construct) {
EXPECT_NEAR(out_data[0], 0.02194316, 1e-6);
auto func = layer.Function("infer");
outs = (*func)(inputs);
outs = func(inputs);
out_data = outs[0].data<float>();
EXPECT_NEAR(out_data[0], 1.41562390, 1e-6);
auto pow_out =
......@@ -127,7 +128,7 @@ TEST(GpuLayerTest, Construct) {
EXPECT_NEAR(out_data[0], 0.02194316, 1e-6);
auto func = layer.Function("infer");
outs = (*func)(inputs);
outs = func(inputs);
gpu_tensor = outs[0];
cpu_tensor = paddle::experimental::copy_to(gpu_tensor, phi::CPUPlace(), true);
out_data = cpu_tensor.data<float>();
......
......@@ -20,8 +20,8 @@
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/jit/function/executor_function.h"
#include "paddle/fluid/jit/function/pe_function.h"
#include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/property.h"
#include "paddle/fluid/jit/serializer_utils.h"
......@@ -30,18 +30,18 @@ DECLARE_string(jit_engine_type);
namespace paddle {
namespace jit {
using Name2FunctionInfoMap =
std::unordered_map<std::string, std::shared_ptr<FunctionInfo>>;
Layer Deserializer::operator()(const std::string& path,
const phi::Place& place) {
const auto& pdmodel_paths = utils::PdmodelFilePaths(path);
// set is ordered
std::set<std::string> param_names_set;
std::vector<std::shared_ptr<FunctionInfo>> infos;
Name2FunctionInfoMap info_map;
for (auto& it : pdmodel_paths) {
auto& func_name = it.first;
auto program_desc = LoadProgram(it.second);
// TODO(dev): load int/float attrs
std::vector<std::string> persist_var_names;
auto all_var_desc = program_desc.Block(0).AllVars();
for (auto* desc_ptr : all_var_desc) {
......@@ -51,8 +51,8 @@ Layer Deserializer::operator()(const std::string& path,
}
param_names_set.insert(persist_var_names.begin(), persist_var_names.end());
infos.emplace_back(std::make_shared<FunctionInfo>(
func_name, persist_var_names, program_desc));
info_map[func_name] = std::make_shared<FunctionInfo>(
func_name, persist_var_names, program_desc);
}
Name2VariableMap params_dict;
......@@ -64,23 +64,23 @@ Layer Deserializer::operator()(const std::string& path,
VLOG(3) << "Read Property Success!";
}
Layer layer = Layer(params_dict, attrs_dict, place);
Layer layer = Layer(params_dict, attrs_dict, info_map, place);
for (auto& info : infos) {
for (auto it = info_map.begin(); it != info_map.end(); ++it) {
const std::string& func_name = it->first;
auto& info = it->second;
if (FLAGS_jit_engine_type == "Executor") {
VLOG(3) << "Add function type: ExecutorFunction. name: "
<< info->FunctionName();
layer.SetFunction(
info->FunctionName(),
utils::MakeFunction<ExecutorFunction>(info, params_dict, place));
VLOG(3) << "Add function type: ExecutorEngine. Function name: "
<< func_name;
layer.SetEngine(
func_name,
utils::MakeEngine<ExecutorEngine>(info, params_dict, place));
} else if (FLAGS_jit_engine_type == "PE") {
VLOG(3) << "Add function type: PEFunction. name: "
<< info->FunctionName();
layer.SetFunction(
info->FunctionName(),
utils::MakeFunction<PEFunction>(info, params_dict, place));
VLOG(3) << "Add function type: PEEngine. Function name: " << func_name;
layer.SetEngine(func_name,
utils::MakeEngine<PEEngine>(info, params_dict, place));
} else {
PD_THROW("Invalid JitLayer funciton type.");
PD_THROW("Invalid JitLayer engine type.");
}
}
......
......@@ -1006,8 +1006,8 @@ PADDLE_DEFINE_EXPORTED_bool(
* default=PE
* Example:
* Note:
* FLAGS_jit_engine_type == Executor, using ExecutorFunction by default
* FLAGS_jit_engine_type == PE, using PEFunction by default
* FLAGS_jit_engine_type == Executor, using ExecutorEngine by default
* FLAGS_jit_engine_type == PE, using PEEngine by default
*/
PADDLE_DEFINE_EXPORTED_string(jit_engine_type,
"PE",
......
......@@ -372,8 +372,8 @@ static PyObject* eager_api_jit_function_call(PyObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
std::shared_ptr<jit::BaseFunction> function =
CastPyArg2BaseFunction(PyTuple_GET_ITEM(args, 0), 0);
std::shared_ptr<jit::BaseEngine> function =
CastPyArg2BaseEngine(PyTuple_GET_ITEM(args, 0), 0);
std::vector<paddle::experimental::Tensor> ins =
CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
std::vector<paddle::experimental::Tensor> outs = (*function)(ins);
......
......@@ -22,8 +22,8 @@ limitations under the License. */
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/scope_guard.h"
#include "paddle/fluid/jit/function/executor_function.h"
#include "paddle/fluid/jit/function/pe_function.h"
#include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/operators/py_func_op.h"
#include "paddle/fluid/operators/utils.h"
......@@ -54,8 +54,8 @@ extern PyTypeObject* g_customplace_pytype;
extern PyTypeObject* g_framework_tensor_pytype;
extern PyTypeObject* g_framework_lodtensorarray_pytype;
extern PyTypeObject* g_custom_op_kernel_ctx_pytype;
extern PyTypeObject* g_executor_function_pytype;
extern PyTypeObject* g_pe_function_pytype;
extern PyTypeObject* g_executor_engine_pytype;
extern PyTypeObject* g_pe_engine_pytype;
int TensorDtype2NumpyDtype(phi::DataType dtype) {
switch (dtype) {
......@@ -232,19 +232,18 @@ std::shared_ptr<imperative::VarBase> CastPyArg2VarBase(PyObject* obj,
return py::cast<std::shared_ptr<imperative::VarBase>>(obj);
}
std::shared_ptr<jit::BaseFunction> CastPyArg2BaseFunction(PyObject* obj,
std::shared_ptr<jit::BaseEngine> CastPyArg2BaseEngine(PyObject* obj,
ssize_t arg_pos) {
if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_executor_function_pytype))) {
return ::pybind11::handle(obj)
.cast<std::shared_ptr<jit::ExecutorFunction>>();
obj, reinterpret_cast<PyObject*>(g_executor_engine_pytype))) {
return ::pybind11::handle(obj).cast<std::shared_ptr<jit::ExecutorEngine>>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_pe_function_pytype))) {
return ::pybind11::handle(obj).cast<std::shared_ptr<jit::PEFunction>>();
obj, reinterpret_cast<PyObject*>(g_pe_engine_pytype))) {
return ::pybind11::handle(obj).cast<std::shared_ptr<jit::PEEngine>>();
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"BaseFunction, but got %s",
"BaseEngine, but got %s",
arg_pos + 1,
reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
......
......@@ -20,7 +20,7 @@ typedef SSIZE_T ssize_t;
#include "paddle/fluid/eager/hooks.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/jit/function/base_function.h"
#include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
......@@ -75,7 +75,7 @@ framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj,
std::unordered_map<std::wstring, int> CastPyArg2Vocab(PyObject* obj,
ssize_t arg_pos);
std::vector<std::string> CastPyArg2Strings(PyObject* obj, ssize_t arg_pos);
std::shared_ptr<jit::BaseFunction> CastPyArg2BaseFunction(PyObject* obj,
std::shared_ptr<jit::BaseEngine> CastPyArg2BaseEngine(PyObject* obj,
ssize_t arg_pos);
PyObject* ToPyObject(int value);
......
......@@ -18,8 +18,8 @@ limitations under the License. */
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/jit/function/executor_function.h"
#include "paddle/fluid/jit/function/pe_function.h"
#include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/serializer.h"
......@@ -29,27 +29,26 @@ namespace py = pybind11;
namespace paddle {
namespace pybind {
PyTypeObject *g_executor_function_pytype = nullptr;
PyTypeObject *g_pe_function_pytype = nullptr;
PyTypeObject *g_executor_engine_pytype = nullptr;
PyTypeObject *g_pe_engine_pytype = nullptr;
using Variable = paddle::framework::Variable;
void BindJit(pybind11::module *m) {
py::class_<jit::Layer>(*m, "Layer", R"DOC(Layer Class.)DOC")
.def("function_dict",
&jit::Layer::FunctionMap,
&jit::Layer::EngineMap,
py::return_value_policy::reference);
py::class_<jit::ExecutorFunction, std::shared_ptr<jit::ExecutorFunction>>
executor_function(
*m, "ExectorFunction", R"DOC(ExectorFunction Class.)DOC");
g_executor_function_pytype =
reinterpret_cast<PyTypeObject *>(executor_function.ptr());
executor_function.def("info", &jit::ExecutorFunction::Info);
py::class_<jit::PEFunction, std::shared_ptr<jit::PEFunction>> pe_function(
*m, "PEFunction", R"DOC(PEFunction Class.)DOC");
g_pe_function_pytype = reinterpret_cast<PyTypeObject *>(pe_function.ptr());
pe_function.def("info", &jit::PEFunction::Info);
py::class_<jit::ExecutorEngine, std::shared_ptr<jit::ExecutorEngine>>
executor_engine(*m, "ExecutorEngine", R"DOC(ExecutorEngine Class.)DOC");
g_executor_engine_pytype =
reinterpret_cast<PyTypeObject *>(executor_engine.ptr());
executor_engine.def("info", &jit::ExecutorEngine::Info);
py::class_<jit::PEEngine, std::shared_ptr<jit::PEEngine>> pe_engine(
*m, "PEEngine", R"DOC(PEEngine Class.)DOC");
g_pe_engine_pytype = reinterpret_cast<PyTypeObject *>(pe_engine.ptr());
pe_engine.def("info", &jit::PEEngine::Info);
py::class_<jit::FunctionInfo, std::shared_ptr<jit::FunctionInfo>>(
*m, "FunctionInfo", R"DOC(FunctionInfo Class.)DOC")
......
......@@ -628,7 +628,7 @@ headers = (
# utils api headers
list(find_files('*.h', '@PADDLE_SOURCE_DIR@/paddle/utils', recursive=True))) # paddle utils headers
jit_layer_headers = ['layer.h', 'serializer.h', 'serializer_utils.h', 'all.h', 'base_function.h']
jit_layer_headers = ['layer.h', 'serializer.h', 'serializer_utils.h', 'all.h', 'function.h']
for f in jit_layer_headers:
headers += list(find_files(f, '@PADDLE_SOURCE_DIR@/paddle/fluid/jit', recursive=True))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册