未验证 提交 05d5bbfb 编写于 作者: A Aurelius84 提交者: GitHub

[JIT]Layer supports eager dygraph mode and Polish Function interface (#44283)

* [JIT]Layer support eager dygraph mode and polish Function interface

* remove usless code

* fix #define
上级 d4699bd6
...@@ -14,23 +14,23 @@ ...@@ -14,23 +14,23 @@
#pragma once #pragma once
#include <ostream> #include "paddle/phi/api/include/tensor.h"
#include <string> #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/common/place.h"
#include "paddle/fluid/framework/variable.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
using Variable = paddle::framework::Variable; using Tensor = paddle::experimental::Tensor;
using DenseTensor = phi::DenseTensor;
class BaseFunction { class BaseFunction {
public: public:
virtual std::vector<Variable> operator()( virtual std::vector<DenseTensor> operator()(
const std::vector<Variable> &inputs) = 0; const std::vector<DenseTensor> &inputs) = 0;
virtual std::vector<Tensor> operator()(const std::vector<Tensor> &inputs) = 0;
virtual ~BaseFunction() {} virtual ~BaseFunction() {}
// virtual void SetPalce(const phi::Place &place);
}; };
} // namespace jit } // namespace jit
......
...@@ -24,7 +24,7 @@ std::shared_ptr<BaseFunction> CompilationUnit::Function( ...@@ -24,7 +24,7 @@ std::shared_ptr<BaseFunction> CompilationUnit::Function(
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
function_map_.count(name), function_map_.count(name),
1, 1,
platform::errors::InvalidArgument( phi::errors::InvalidArgument(
"Funciton name %s is not exist in function_map_.", name)); "Funciton name %s is not exist in function_map_.", name));
return function_map_.at(name); return function_map_.at(name);
} }
......
...@@ -42,17 +42,21 @@ class ExecutorFunction : public BaseFunction { ...@@ -42,17 +42,21 @@ class ExecutorFunction : public BaseFunction {
~ExecutorFunction() noexcept {} ~ExecutorFunction() noexcept {}
std::vector<Variable> operator()(const std::vector<Variable> &inputs) { std::vector<Tensor> operator()(const std::vector<Tensor> &inputs) {
utils::ShareInputsIntoScope(info_->InputArgNames(), inputs, &scope_); auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors));
}
std::vector<DenseTensor> operator()(const std::vector<DenseTensor> &inputs) {
utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
inner_exe_.Run(info_->ProgramDesc(), inner_exe_.Run(info_->ProgramDesc(),
&scope_, &scope_,
/*blockID=*/0, /*blockID=*/0,
false, false,
true, true,
info_->OutputArgNames()); info_->OutputArgNames());
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); std::vector<DenseTensor> res;
std::vector<Variable> res; utils::FetchOuts(info_->OutputArgNames(), scope_, &res);
utils::FetchVarsByNames(info_->OutputArgNames(), scope_, &res);
return res; return res;
} }
......
...@@ -21,36 +21,50 @@ ...@@ -21,36 +21,50 @@
namespace paddle { namespace paddle {
namespace jit { namespace jit {
namespace utils { namespace utils {
void FetchVarsByNames(const std::vector<std::string> &names,
const framework::Scope &scope, std::vector<DenseTensor> ToDenseTensors(const std::vector<Tensor> &tensors) {
std::vector<Variable> *outs) { std::vector<DenseTensor> ret;
for (auto &out_name : names) { for (auto &t : tensors) {
ret.emplace_back(*std::dynamic_pointer_cast<phi::DenseTensor>(t.impl()));
}
return ret;
}
std::vector<Tensor> ToTensors(const std::vector<DenseTensor> &tensors) {
std::vector<Tensor> ret;
for (auto &t : tensors) {
ret.emplace_back(std::make_shared<DenseTensor>(t));
}
return ret;
}
void FetchOuts(const std::vector<std::string> &names,
const framework::Scope &scope,
std::vector<DenseTensor> *outs) {
outs->reserve(names.size());
for (size_t i = 0; i < names.size(); ++i) {
auto &out_name = names[i];
VLOG(3) << "fetch out: " << out_name; VLOG(3) << "fetch out: " << out_name;
auto *var = scope.FindVar(out_name); auto *var = scope.FindVar(out_name);
auto &src_tensor = var->Get<DenseTensor>(); auto &src_tensor = var->Get<DenseTensor>();
Variable v; outs->emplace_back(src_tensor);
auto *p = v.GetMutable<DenseTensor>();
*p = src_tensor;
outs->emplace_back(v);
} }
} }
void ShareInputsIntoScope(const std::vector<std::string> &ordered_input_names, void ShareIntoScope(const std::vector<std::string> &ordered_input_names,
const std::vector<Variable> &vars, const std::vector<DenseTensor> &tensors,
framework::Scope *scope) { framework::Scope *scope) {
VLOG(3) << "vars size: " << vars.size(); VLOG(3) << "tensors size: " << tensors.size();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
vars.size(), tensors.size(),
ordered_input_names.size(), ordered_input_names.size(),
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"vars.size() should be equal to ordered_input_names.size().")); "tensors.size() should be equal to ordered_input_names.size()."));
for (size_t i = 0; i < tensors.size(); ++i) {
for (size_t i = 0; i < vars.size(); i++) {
VLOG(3) << "share into scope: " << ordered_input_names[i]; VLOG(3) << "share into scope: " << ordered_input_names[i];
auto &dense_tensor = vars[i].Get<DenseTensor>();
auto *var = scope->Var(ordered_input_names[i]); auto *var = scope->Var(ordered_input_names[i]);
auto *dst_tensor = var->GetMutable<DenseTensor>(); auto *dst_tensor = var->GetMutable<DenseTensor>();
*dst_tensor = dense_tensor; *dst_tensor = tensors[i];
} }
} }
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable.h"
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/common/place.h" #include "paddle/phi/common/place.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
...@@ -30,15 +31,20 @@ namespace jit { ...@@ -30,15 +31,20 @@ namespace jit {
using Variable = paddle::framework::Variable; using Variable = paddle::framework::Variable;
using Name2VariableMap = std::unordered_map<std::string, Variable>; using Name2VariableMap = std::unordered_map<std::string, Variable>;
using DenseTensor = phi::DenseTensor; using DenseTensor = phi::DenseTensor;
using Tensor = paddle::experimental::Tensor;
namespace utils { namespace utils {
void FetchVarsByNames(const std::vector<std::string> &names, std::vector<DenseTensor> ToDenseTensors(const std::vector<Tensor> &tensors);
const framework::Scope &scope, std::vector<Tensor> ToTensors(const std::vector<DenseTensor> &tensors);
std::vector<Variable> *outs);
void ShareInputsIntoScope(const std::vector<std::string> &ordered_input_names, void FetchOuts(const std::vector<std::string> &names,
const std::vector<Variable> &vars, const framework::Scope &scope,
framework::Scope *scope); std::vector<DenseTensor> *outs);
void ShareIntoScope(const std::vector<std::string> &ordered_input_names,
const std::vector<DenseTensor> &vars,
framework::Scope *scope);
void ShareParamsIntoScope(const std::vector<std::string> &param_names, void ShareParamsIntoScope(const std::vector<std::string> &param_names,
const Name2VariableMap &params_dict, const Name2VariableMap &params_dict,
......
...@@ -16,9 +16,6 @@ ...@@ -16,9 +16,6 @@
namespace paddle { namespace paddle {
namespace jit { namespace jit {
// TODO(dev): Make vector<string>, num_slot as in argument
// Layer(const std::shared_ptr<ClassType>& type) : obj_(type, /*num_slot*/ 0U)
// {}
Layer::Layer(const std::vector<std::shared_ptr<FunctionInfo>>& infos, Layer::Layer(const std::vector<std::shared_ptr<FunctionInfo>>& infos,
const Name2VariableMap& params_dict, const Name2VariableMap& params_dict,
const phi::Place& place) const phi::Place& place)
...@@ -30,7 +27,13 @@ std::shared_ptr<BaseFunction> Layer::Function(const std::string& name) const { ...@@ -30,7 +27,13 @@ std::shared_ptr<BaseFunction> Layer::Function(const std::string& name) const {
return unit_.Function(name); return unit_.Function(name);
} }
std::vector<Variable> Layer::forward(const std::vector<Variable>& inputs) { std::vector<Tensor> Layer::forward(const std::vector<Tensor>& inputs) {
auto func = Function("forward");
return (*func)(inputs);
}
std::vector<DenseTensor> Layer::forward(
const std::vector<DenseTensor>& inputs) {
auto func = Function("forward"); auto func = Function("forward");
return (*func)(inputs); return (*func)(inputs);
} }
......
...@@ -32,9 +32,6 @@ using Name2VariableMap = std::unordered_map<std::string, Variable>; ...@@ -32,9 +32,6 @@ using Name2VariableMap = std::unordered_map<std::string, Variable>;
class Layer { class Layer {
public: public:
// TODO(dev): Make vector<string>, num_slot as in argument
// Layer(const std::shared_ptr<ClassType>& type) : obj_(type, /*num_slot*/ 0U)
// {}
Layer(const std::vector<std::shared_ptr<FunctionInfo>>& infos, Layer(const std::vector<std::shared_ptr<FunctionInfo>>& infos,
const Name2VariableMap& params_dict, const Name2VariableMap& params_dict,
const phi::Place& place); const phi::Place& place);
...@@ -43,7 +40,9 @@ class Layer { ...@@ -43,7 +40,9 @@ class Layer {
Variable Attribute(const std::string& name) const; Variable Attribute(const std::string& name) const;
std::vector<Variable> forward(const std::vector<Variable>& inputs); std::vector<Tensor> forward(const std::vector<Tensor>& inputs);
std::vector<DenseTensor> forward(const std::vector<DenseTensor>& inputs);
void to(const phi::Place& place); void to(const phi::Place& place);
...@@ -55,7 +54,6 @@ class Layer { ...@@ -55,7 +54,6 @@ class Layer {
const Name2FunctionMap& FunctionMap() const; const Name2FunctionMap& FunctionMap() const;
private: private:
// internal::Object obj_;
Name2VariableMap params_dict_; Name2VariableMap params_dict_;
Name2VariableMap attrs_dict_; Name2VariableMap attrs_dict_;
CompilationUnit unit_; CompilationUnit unit_;
......
...@@ -52,17 +52,16 @@ namespace paddle { ...@@ -52,17 +52,16 @@ namespace paddle {
namespace jit { namespace jit {
using DenseTensor = phi::DenseTensor; using DenseTensor = phi::DenseTensor;
std::vector<Variable> PrepareInputs(const phi::Place& place) { std::vector<DenseTensor> PrepareInputs(const phi::Place& place) {
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto& dev_ctx = *pool.Get(place); auto& dev_ctx = *pool.Get(place);
Variable v; DenseTensor t;
auto* dense_tensor = v.GetMutable<DenseTensor>(); t.Resize(phi::make_ddim({2, 4}));
dense_tensor->Resize(phi::make_ddim({2, 4})); t.mutable_data<float>(place);
dense_tensor->mutable_data<float>(place); phi::funcs::set_constant(dev_ctx, &t, 2.);
phi::funcs::set_constant(dev_ctx, dense_tensor, 2.);
return {v}; return {t};
} }
TEST(CpuLayerTest, Construct) { TEST(CpuLayerTest, Construct) {
...@@ -72,16 +71,12 @@ TEST(CpuLayerTest, Construct) { ...@@ -72,16 +71,12 @@ TEST(CpuLayerTest, Construct) {
auto inputs = PrepareInputs(place); auto inputs = PrepareInputs(place);
auto outs = layer.forward(inputs); auto outs = layer.forward(inputs);
auto out_vars = outs[0]; auto out_data = outs[0].data<float>();
auto out_dense_tensor = out_vars.Get<DenseTensor>();
auto out_data = out_dense_tensor.data<float>();
EXPECT_NEAR(out_data[0], 0.02194316, 1e-6); EXPECT_NEAR(out_data[0], 0.02194316, 1e-6);
auto func = layer.Function("infer"); auto func = layer.Function("infer");
outs = (*func)(inputs); outs = (*func)(inputs);
out_vars = outs[0]; out_data = outs[0].data<float>();
out_dense_tensor = out_vars.Get<DenseTensor>();
out_data = out_dense_tensor.data<float>();
EXPECT_NEAR(out_data[0], 1.41562390, 1e-6); EXPECT_NEAR(out_data[0], 1.41562390, 1e-6);
} }
...@@ -98,8 +93,7 @@ TEST(GpuLayerTest, Construct) { ...@@ -98,8 +93,7 @@ TEST(GpuLayerTest, Construct) {
auto inputs = PrepareInputs(place); auto inputs = PrepareInputs(place);
auto outs = layer.forward(inputs); auto outs = layer.forward(inputs);
auto out_vars = outs[0]; auto out_dense_tensor = outs[0];
auto out_dense_tensor = out_vars.Get<DenseTensor>();
phi::Copy( phi::Copy(
*dev_ctx_gpu, out_dense_tensor, phi::CPUPlace(), true, &cpu_dense_tensor); *dev_ctx_gpu, out_dense_tensor, phi::CPUPlace(), true, &cpu_dense_tensor);
auto out_data = cpu_dense_tensor.data<float>(); auto out_data = cpu_dense_tensor.data<float>();
...@@ -107,8 +101,7 @@ TEST(GpuLayerTest, Construct) { ...@@ -107,8 +101,7 @@ TEST(GpuLayerTest, Construct) {
auto func = layer.Function("infer"); auto func = layer.Function("infer");
outs = (*func)(inputs); outs = (*func)(inputs);
out_vars = outs[0]; out_dense_tensor = outs[0];
out_dense_tensor = out_vars.Get<DenseTensor>();
phi::Copy( phi::Copy(
*dev_ctx_gpu, out_dense_tensor, phi::CPUPlace(), true, &cpu_dense_tensor); *dev_ctx_gpu, out_dense_tensor, phi::CPUPlace(), true, &cpu_dense_tensor);
out_data = cpu_dense_tensor.data<float>(); out_data = cpu_dense_tensor.data<float>();
......
...@@ -43,24 +43,29 @@ class PEFunction : public BaseFunction { ...@@ -43,24 +43,29 @@ class PEFunction : public BaseFunction {
~PEFunction() noexcept {} ~PEFunction() noexcept {}
std::vector<Variable> operator()(const std::vector<Variable> &inputs) { std::vector<Tensor> operator()(const std::vector<Tensor> &inputs) {
// bool is_test = true; auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors));
}
std::vector<DenseTensor> operator()(const std::vector<DenseTensor> &inputs) {
std::string prog_string; std::string prog_string;
std::hash<std::string> string_hash; std::hash<std::string> string_hash;
auto &program_desc = info_->ProgramDesc(); auto &program_desc = info_->ProgramDesc();
// TODO(dev): Serialize is very slow.
const_cast<framework::ProgramDesc *>(&program_desc) const_cast<framework::ProgramDesc *>(&program_desc)
->Proto() ->Proto()
->SerializePartialToString(&prog_string); ->SerializePartialToString(&prog_string);
// program_desc.Proto()->SerializePartialToString(&prog_string);
int64_t program_id = static_cast<int64_t>(string_hash(prog_string)); int64_t program_id = static_cast<int64_t>(string_hash(prog_string));
const framework::BlockDesc &global_block = program_desc.Block(0); const framework::BlockDesc &global_block = program_desc.Block(0);
int64_t start_op_index = 0; int64_t start_op_index = 0;
int64_t end_op_index = static_cast<int64_t>(global_block.OpSize()); int64_t end_op_index = static_cast<int64_t>(global_block.OpSize());
utils::ShareInputsIntoScope(info_->InputArgNames(), inputs, &scope_); utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
std::vector<std::string> input_var_names = info_->InputArgNames(); std::vector<std::string> input_var_names = info_->InputArgNames();
std::vector<std::string> output_var_names = info_->OutputArgNames(); std::vector<std::string> output_var_names = info_->OutputArgNames();
std::vector<std::string> dout_var_names;
if (end_op_index > start_op_index) { if (end_op_index > start_op_index) {
auto cache_info = framework::GetExecutorInfoFromCache(program_desc, auto cache_info = framework::GetExecutorInfoFromCache(program_desc,
place_, place_,
...@@ -78,9 +83,7 @@ class PEFunction : public BaseFunction { ...@@ -78,9 +83,7 @@ class PEFunction : public BaseFunction {
skip_eager_delete_vars.insert(skip_eager_delete_vars.end(), skip_eager_delete_vars.insert(skip_eager_delete_vars.end(),
output_var_names.begin(), output_var_names.begin(),
output_var_names.end()); output_var_names.end());
skip_eager_delete_vars.insert(skip_eager_delete_vars.end(),
dout_var_names.begin(),
dout_var_names.end());
framework::details::ParseSafeEagerDeletionSkipVars( framework::details::ParseSafeEagerDeletionSkipVars(
program_desc, program_desc,
end_op_index, end_op_index,
...@@ -89,9 +92,8 @@ class PEFunction : public BaseFunction { ...@@ -89,9 +92,8 @@ class PEFunction : public BaseFunction {
} }
parallel_executor->RunWithoutFetch(skip_eager_delete_vars); parallel_executor->RunWithoutFetch(skip_eager_delete_vars);
} }
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); std::vector<DenseTensor> res;
std::vector<Variable> res; utils::FetchOuts(info_->OutputArgNames(), scope_, &res);
utils::FetchVarsByNames(info_->OutputArgNames(), scope_, &res);
return res; return res;
} }
......
...@@ -357,6 +357,19 @@ static std::vector<paddle::any> CastAttrsToTragetType( ...@@ -357,6 +357,19 @@ static std::vector<paddle::any> CastAttrsToTragetType(
return res; return res;
} }
static PyObject* eager_api_jit_function_call(PyObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
std::shared_ptr<jit::BaseFunction> function =
CastPyArg2BaseFunction(PyTuple_GET_ITEM(args, 0), 0);
std::vector<paddle::experimental::Tensor> ins =
CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
std::vector<paddle::experimental::Tensor> outs = (*function)(ins);
return ToPyObject(outs);
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_api_run_costum_op(PyObject* self, static PyObject* eager_api_run_costum_op(PyObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
...@@ -911,6 +924,10 @@ PyMethodDef variable_functions[] = { ...@@ -911,6 +924,10 @@ PyMethodDef variable_functions[] = {
(PyCFunction)(void (*)(void))eager_api_read_next_tensor_list, (PyCFunction)(void (*)(void))eager_api_read_next_tensor_list,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, NULL},
{"jit_function_call",
(PyCFunction)(void (*)(void))eager_api_jit_function_call,
METH_VARARGS | METH_KEYWORDS,
NULL},
/**sparse functions**/ /**sparse functions**/
{"sparse_coo_tensor", {"sparse_coo_tensor",
(PyCFunction)(void (*)(void))eager_api_sparse_coo_tensor, (PyCFunction)(void (*)(void))eager_api_sparse_coo_tensor,
......
...@@ -51,6 +51,7 @@ extern PyTypeObject* g_customplace_pytype; ...@@ -51,6 +51,7 @@ extern PyTypeObject* g_customplace_pytype;
extern PyTypeObject* g_framework_tensor_pytype; extern PyTypeObject* g_framework_tensor_pytype;
extern PyTypeObject* g_framework_lodtensorarray_pytype; extern PyTypeObject* g_framework_lodtensorarray_pytype;
extern PyTypeObject* g_custom_op_kernel_ctx_pytype; extern PyTypeObject* g_custom_op_kernel_ctx_pytype;
extern PyTypeObject* g_executor_function_pytype;
int TensorDtype2NumpyDtype(phi::DataType dtype) { int TensorDtype2NumpyDtype(phi::DataType dtype) {
switch (dtype) { switch (dtype) {
...@@ -227,6 +228,21 @@ std::shared_ptr<imperative::VarBase> CastPyArg2VarBase(PyObject* obj, ...@@ -227,6 +228,21 @@ std::shared_ptr<imperative::VarBase> CastPyArg2VarBase(PyObject* obj,
return py::cast<std::shared_ptr<imperative::VarBase>>(obj); return py::cast<std::shared_ptr<imperative::VarBase>>(obj);
} }
std::shared_ptr<jit::BaseFunction> CastPyArg2BaseFunction(PyObject* obj,
ssize_t arg_pos) {
if (PyObject_IsInstance(
obj, reinterpret_cast<PyObject*>(g_executor_function_pytype))) {
return ::pybind11::handle(obj)
.cast<std::shared_ptr<jit::ExecutorFunction>>();
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"BaseFunction, but got %s",
arg_pos + 1,
reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
}
std::vector<paddle::experimental::Tensor> CastPyArg2VectorOfTensor( std::vector<paddle::experimental::Tensor> CastPyArg2VectorOfTensor(
PyObject* obj, ssize_t arg_pos) { PyObject* obj, ssize_t arg_pos) {
std::vector<paddle::experimental::Tensor> result; std::vector<paddle::experimental::Tensor> result;
......
...@@ -19,6 +19,7 @@ typedef SSIZE_T ssize_t; ...@@ -19,6 +19,7 @@ typedef SSIZE_T ssize_t;
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/jit/executor_function.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/phi/common/backend.h" #include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h" #include "paddle/phi/common/data_type.h"
...@@ -72,6 +73,8 @@ framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, ...@@ -72,6 +73,8 @@ framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj,
std::unordered_map<std::wstring, int> CastPyArg2Vocab(PyObject* obj, std::unordered_map<std::wstring, int> CastPyArg2Vocab(PyObject* obj,
ssize_t arg_pos); ssize_t arg_pos);
std::vector<std::string> CastPyArg2Strings(PyObject* obj, ssize_t arg_pos); std::vector<std::string> CastPyArg2Strings(PyObject* obj, ssize_t arg_pos);
std::shared_ptr<jit::BaseFunction> CastPyArg2BaseFunction(PyObject* obj,
ssize_t arg_pos);
PyObject* ToPyObject(int value); PyObject* ToPyObject(int value);
PyObject* ToPyObject(uint32_t value); PyObject* ToPyObject(uint32_t value);
......
...@@ -28,39 +28,21 @@ namespace py = pybind11; ...@@ -28,39 +28,21 @@ namespace py = pybind11;
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
PyTypeObject *g_executor_function_pytype = nullptr;
using Variable = paddle::framework::Variable; using Variable = paddle::framework::Variable;
void BindJit(pybind11::module *m) { void BindJit(pybind11::module *m) {
py::class_<jit::Layer>(*m, "Layer", R"DOC(Layer Class.)DOC") py::class_<jit::Layer>(*m, "Layer", R"DOC(Layer Class.)DOC")
.def("function_dict", &jit::Layer::FunctionMap); .def("function_dict",
&jit::Layer::FunctionMap,
py::class_<jit::ExecutorFunction, std::shared_ptr<jit::ExecutorFunction>>( py::return_value_policy::reference);
*m, "ExectorFunction", R"DOC(ExectorFunction Class.)DOC")
.def("__call__", py::class_<jit::ExecutorFunction, std::shared_ptr<jit::ExecutorFunction>>
[](jit::ExecutorFunction &self, executor_function(
const std::vector<std::shared_ptr<imperative::VarBase>> *m, "ExectorFunction", R"DOC(ExectorFunction Class.)DOC");
&tensor_inputs) { g_executor_function_pytype =
std::vector<Variable> var_inputs; reinterpret_cast<PyTypeObject *>(executor_function.ptr());
for (auto &tensor : tensor_inputs) { executor_function.def("info", &jit::ExecutorFunction::Info);
var_inputs.emplace_back(tensor->Var());
}
auto var_outputs = self(var_inputs);
std::vector<std::shared_ptr<imperative::VarBase>> tensor_outputs;
auto output_names = self.Info()->OutputArgNames();
for (size_t i = 0; i < var_outputs.size(); ++i) {
auto var = var_outputs[i];
std::string name = output_names[i];
imperative::VariableWrapper var_wrapper(name, var);
auto shared_wrapper =
std::make_shared<imperative::VariableWrapper>(var_wrapper);
auto shared_varbase =
std::make_shared<imperative::VarBase>(shared_wrapper);
tensor_outputs.emplace_back(shared_varbase);
}
return tensor_outputs;
})
.def("info", &jit::ExecutorFunction::Info);
py::class_<jit::FunctionInfo, std::shared_ptr<jit::FunctionInfo>>( py::class_<jit::FunctionInfo, std::shared_ptr<jit::FunctionInfo>>(
*m, "FunctionInfo", R"DOC(FunctionInfo Class.)DOC") *m, "FunctionInfo", R"DOC(FunctionInfo Class.)DOC")
......
...@@ -22,7 +22,6 @@ from paddle.fluid.framework import _enable_legacy_dygraph ...@@ -22,7 +22,6 @@ from paddle.fluid.framework import _enable_legacy_dygraph
from paddle.jit.layer import Layer from paddle.jit.layer import Layer
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator
_enable_legacy_dygraph()
paddle.seed(1) paddle.seed(1)
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle.fluid import core
from paddle.fluid.core import Load from paddle.fluid.core import Load
...@@ -39,7 +40,7 @@ class Function(): ...@@ -39,7 +40,7 @@ class Function():
self.info = FunctionInfo(function.info()) self.info = FunctionInfo(function.info())
def __call__(self, *args): def __call__(self, *args):
return self.function(args) return core.eager.jit_function_call(self.function, args)
class FunctionInfo(): class FunctionInfo():
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册