未验证 提交 2832ab22 编写于 作者: W WangZhen 提交者: GitHub

[JitLayer]Pybind Fucniton and hide ExecutorEngine and PEEngine (#44984)

* Pybind Fucniton and hide ExecutorEngine and PEEngine

* Remove FunctionNames in compilation_unit
上级 cd0b03cd
......@@ -73,12 +73,17 @@ void ShareIntoScope(const std::vector<std::string> &ordered_input_names,
void ShareParamsIntoScope(const std::vector<std::string> &param_names,
const Name2VariableMap &params_dict,
framework::Scope *scope) {
VLOG(3) << "param_names size: " << param_names.size();
for (size_t i = 0; i < param_names.size(); ++i) {
std::string name = param_names[i];
PADDLE_ENFORCE_EQ(params_dict.count(name),
1,
phi::errors::InvalidArgument(
"Parameter named %s is not exist in param_names. "
"Please check that your model was saved correctly",
name));
auto &param = params_dict.find(name)->second;
auto &dense_tensor = param->Get<DenseTensor>();
VLOG(3) << "share into scope: " << name;
auto *var = scope->Var(name);
auto *dst_tensor = var->GetMutable<DenseTensor>();
*dst_tensor = dense_tensor;
......
......@@ -68,6 +68,14 @@ const std::shared_ptr<jit::FunctionInfo>& Layer::FunctionInfo(
return info_map_.at(name);
}
std::vector<std::string> Layer::FunctionNames() const {
std::vector<std::string> names;
for (auto it = info_map_.begin(); it != info_map_.end(); ++it) {
names.emplace_back(it->first);
}
return names;
}
#define PD_SPECIALZE_ATTRIBUTE_TYPE(T) \
template <> \
T Layer::Attribute<T>(const std::string& name) const { \
......
......@@ -70,6 +70,8 @@ class Layer {
const std::shared_ptr<jit::FunctionInfo>& FunctionInfo(
const std::string& name) const;
std::vector<std::string> FunctionNames() const;
private:
Name2VariableMap params_map_;
Name2VariableMap attrs_map_;
......
......@@ -372,8 +372,9 @@ static PyObject* eager_api_jit_function_call(PyObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
std::shared_ptr<jit::BaseEngine> function =
CastPyArg2BaseEngine(PyTuple_GET_ITEM(args, 0), 0);
std::shared_ptr<jit::Function> function =
CastPyArg2JitFunction(PyTuple_GET_ITEM(args, 0), 0);
std::vector<paddle::experimental::Tensor> ins =
CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
std::vector<paddle::experimental::Tensor> outs = (*function)(ins);
......
......@@ -22,8 +22,7 @@ limitations under the License. */
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/scope_guard.h"
#include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/jit/function.h"
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/operators/py_func_op.h"
#include "paddle/fluid/operators/utils.h"
......@@ -54,8 +53,7 @@ extern PyTypeObject* g_customplace_pytype;
extern PyTypeObject* g_framework_tensor_pytype;
extern PyTypeObject* g_framework_lodtensorarray_pytype;
extern PyTypeObject* g_custom_op_kernel_ctx_pytype;
extern PyTypeObject* g_executor_engine_pytype;
extern PyTypeObject* g_pe_engine_pytype;
extern PyTypeObject* g_jit_function_pytype;
int TensorDtype2NumpyDtype(phi::DataType dtype) {
switch (dtype) {
......@@ -232,14 +230,11 @@ std::shared_ptr<imperative::VarBase> CastPyArg2VarBase(PyObject* obj,
return py::cast<std::shared_ptr<imperative::VarBase>>(obj);
}
std::shared_ptr<jit::BaseEngine> CastPyArg2BaseEngine(PyObject* obj,
std::shared_ptr<jit::Function> CastPyArg2JitFunction(PyObject* obj,
ssize_t arg_pos) {
if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_executor_engine_pytype))) {
return ::pybind11::handle(obj).cast<std::shared_ptr<jit::ExecutorEngine>>();
} else if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_pe_engine_pytype))) {
return ::pybind11::handle(obj).cast<std::shared_ptr<jit::PEEngine>>();
if (PyObject_IsInstance(obj,
reinterpret_cast<PyObject*>(g_jit_function_pytype))) {
return ::pybind11::handle(obj).cast<std::shared_ptr<jit::Function>>();
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
......
......@@ -20,7 +20,7 @@ typedef SSIZE_T ssize_t;
#include "paddle/fluid/eager/hooks.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
......@@ -75,7 +75,7 @@ framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj,
std::unordered_map<std::wstring, int> CastPyArg2Vocab(PyObject* obj,
ssize_t arg_pos);
std::vector<std::string> CastPyArg2Strings(PyObject* obj, ssize_t arg_pos);
std::shared_ptr<jit::BaseEngine> CastPyArg2BaseEngine(PyObject* obj,
std::shared_ptr<jit::Function> CastPyArg2JitFunction(PyObject* obj,
ssize_t arg_pos);
PyObject* ToPyObject(int value);
......
......@@ -18,8 +18,7 @@ limitations under the License. */
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/jit/function.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/serializer.h"
......@@ -29,26 +28,18 @@ namespace py = pybind11;
namespace paddle {
namespace pybind {
PyTypeObject *g_executor_engine_pytype = nullptr;
PyTypeObject *g_pe_engine_pytype = nullptr;
PyTypeObject *g_jit_function_pytype = nullptr;
using Variable = paddle::framework::Variable;
void BindJit(pybind11::module *m) {
py::class_<jit::Layer>(*m, "Layer", R"DOC(Layer Class.)DOC")
.def("function_dict",
&jit::Layer::EngineMap,
py::return_value_policy::reference);
.def("function_names", &jit::Layer::FunctionNames)
.def("function", &jit::Layer::Function)
.def("function_info", &jit::Layer::FunctionInfo);
py::class_<jit::ExecutorEngine, std::shared_ptr<jit::ExecutorEngine>>
executor_engine(*m, "ExecutorEngine", R"DOC(ExecutorEngine Class.)DOC");
g_executor_engine_pytype =
reinterpret_cast<PyTypeObject *>(executor_engine.ptr());
executor_engine.def("info", &jit::ExecutorEngine::Info);
py::class_<jit::PEEngine, std::shared_ptr<jit::PEEngine>> pe_engine(
*m, "PEEngine", R"DOC(PEEngine Class.)DOC");
g_pe_engine_pytype = reinterpret_cast<PyTypeObject *>(pe_engine.ptr());
pe_engine.def("info", &jit::PEEngine::Info);
py::class_<jit::Function, std::shared_ptr<jit::Function>> function(
*m, "Function", R"DOC(Function Class.)DOC");
g_jit_function_pytype = reinterpret_cast<PyTypeObject *>(function.ptr());
py::class_<jit::FunctionInfo, std::shared_ptr<jit::FunctionInfo>>(
*m, "FunctionInfo", R"DOC(FunctionInfo Class.)DOC")
......
......@@ -26,18 +26,19 @@ class Layer(object):
def load(self, load_path, place):
self.cpp_layer = Load(load_path, place)
function_dict = self.cpp_layer.function_dict()
for name, function in function_dict.items():
self.functions[name] = Function(function)
for name in self.cpp_layer.function_names():
function = self.cpp_layer.function(name)
info = self.cpp_layer.function_info(name)
self.functions[name] = Function(function, info)
setattr(self, name, self.functions[name])
class Function():
def __init__(self, function):
def __init__(self, function, info):
self.function = function
self.info = FunctionInfo(function.info())
self.info = FunctionInfo(info)
def __call__(self, *args):
return core.eager.jit_function_call(self.function, args)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册