diff --git a/paddle/fluid/jit/base_function.h b/paddle/fluid/jit/base_function.h index ebe4314a5319e2bf44ff52c91623f791f2ecf60b..df774d8fd84c701b789d403344945cc8bf1fa950 100644 --- a/paddle/fluid/jit/base_function.h +++ b/paddle/fluid/jit/base_function.h @@ -14,23 +14,23 @@ #pragma once -#include -#include - -#include "paddle/phi/common/place.h" - -#include "paddle/fluid/framework/variable.h" +#include "paddle/phi/api/include/tensor.h" +#include "paddle/phi/core/dense_tensor.h" namespace paddle { namespace jit { -using Variable = paddle::framework::Variable; +using Tensor = paddle::experimental::Tensor; +using DenseTensor = phi::DenseTensor; + class BaseFunction { public: - virtual std::vector operator()( - const std::vector &inputs) = 0; + virtual std::vector operator()( + const std::vector &inputs) = 0; + + virtual std::vector operator()(const std::vector &inputs) = 0; + virtual ~BaseFunction() {} - // virtual void SetPalce(const phi::Place &place); }; } // namespace jit diff --git a/paddle/fluid/jit/compilation_unit.cc b/paddle/fluid/jit/compilation_unit.cc index d62c497d8b3387dd29f28865f134c952a01a9c98..60d42d045b0e3b7cc52587874a4787830d91d4f2 100644 --- a/paddle/fluid/jit/compilation_unit.cc +++ b/paddle/fluid/jit/compilation_unit.cc @@ -24,7 +24,7 @@ std::shared_ptr CompilationUnit::Function( PADDLE_ENFORCE_EQ( function_map_.count(name), 1, - platform::errors::InvalidArgument( + phi::errors::InvalidArgument( "Funciton name %s is not exist in function_map_.", name)); return function_map_.at(name); } diff --git a/paddle/fluid/jit/executor_function.h b/paddle/fluid/jit/executor_function.h index 36cb438e34cc22dcab8f2575532d1f008957049c..a9b9d59d21bf4bf44154733d792e067be13c2ad5 100644 --- a/paddle/fluid/jit/executor_function.h +++ b/paddle/fluid/jit/executor_function.h @@ -42,17 +42,21 @@ class ExecutorFunction : public BaseFunction { ~ExecutorFunction() noexcept {} - std::vector operator()(const std::vector &inputs) { - utils::ShareInputsIntoScope(info_->InputArgNames(), inputs, &scope_); + std::vector operator()(const std::vector &inputs) { + auto dense_tensors = utils::ToDenseTensors(inputs); + return utils::ToTensors(this->operator()(dense_tensors)); + } + + std::vector operator()(const std::vector &inputs) { + utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_); inner_exe_.Run(info_->ProgramDesc(), &scope_, /*blockID=*/0, false, true, info_->OutputArgNames()); - VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); - std::vector res; - utils::FetchVarsByNames(info_->OutputArgNames(), scope_, &res); + std::vector res; + utils::FetchOuts(info_->OutputArgNames(), scope_, &res); return res; } diff --git a/paddle/fluid/jit/function_utils.cc b/paddle/fluid/jit/function_utils.cc index 4757e784dfe75e30b29ad0dba89de4b3b3459053..a6da061de99dc61b5fb168aedabf62d56d861a18 100644 --- a/paddle/fluid/jit/function_utils.cc +++ b/paddle/fluid/jit/function_utils.cc @@ -21,36 +21,50 @@ namespace paddle { namespace jit { namespace utils { -void FetchVarsByNames(const std::vector &names, - const framework::Scope &scope, - std::vector *outs) { - for (auto &out_name : names) { + +std::vector ToDenseTensors(const std::vector &tensors) { + std::vector ret; + for (auto &t : tensors) { + ret.emplace_back(*std::dynamic_pointer_cast(t.impl())); + } + return ret; +} + +std::vector ToTensors(const std::vector &tensors) { + std::vector ret; + for (auto &t : tensors) { + ret.emplace_back(std::make_shared(t)); + } + return ret; +} + +void FetchOuts(const std::vector &names, + const framework::Scope &scope, + std::vector *outs) { + outs->reserve(names.size()); + for (size_t i = 0; i < names.size(); ++i) { + auto &out_name = names[i]; VLOG(3) << "fetch out: " << out_name; auto *var = scope.FindVar(out_name); auto &src_tensor = var->Get(); - Variable v; - auto *p = v.GetMutable(); - *p = src_tensor; - outs->emplace_back(v); + outs->emplace_back(src_tensor); } } -void ShareInputsIntoScope(const std::vector &ordered_input_names, - const std::vector &vars, - framework::Scope *scope) { - VLOG(3) << "vars size: " << vars.size(); +void ShareIntoScope(const std::vector &ordered_input_names, + const std::vector &tensors, + framework::Scope *scope) { + VLOG(3) << "tensors size: " << tensors.size(); PADDLE_ENFORCE_EQ( - vars.size(), + tensors.size(), ordered_input_names.size(), platform::errors::InvalidArgument( - "vars.size() should be equal to ordered_input_names.size().")); - - for (size_t i = 0; i < vars.size(); i++) { + "tensors.size() should be equal to ordered_input_names.size().")); + for (size_t i = 0; i < tensors.size(); ++i) { VLOG(3) << "share into scope: " << ordered_input_names[i]; - auto &dense_tensor = vars[i].Get(); auto *var = scope->Var(ordered_input_names[i]); auto *dst_tensor = var->GetMutable(); - *dst_tensor = dense_tensor; + *dst_tensor = tensors[i]; } } diff --git a/paddle/fluid/jit/function_utils.h b/paddle/fluid/jit/function_utils.h index 49db3f71fbdbffd35331b5e8f1f721defc51191e..ba1eaf7308be9192b1958a2998ed99cad1003b27 100644 --- a/paddle/fluid/jit/function_utils.h +++ b/paddle/fluid/jit/function_utils.h @@ -20,6 +20,7 @@ #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/variable.h" +#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/common/place.h" #include "paddle/phi/core/dense_tensor.h" @@ -30,15 +31,20 @@ namespace jit { using Variable = paddle::framework::Variable; using Name2VariableMap = std::unordered_map; using DenseTensor = phi::DenseTensor; +using Tensor = paddle::experimental::Tensor; + namespace utils { -void FetchVarsByNames(const std::vector &names, - const framework::Scope &scope, - std::vector *outs); +std::vector ToDenseTensors(const std::vector &tensors); +std::vector ToTensors(const std::vector &tensors); -void ShareInputsIntoScope(const std::vector &ordered_input_names, - const std::vector &vars, - framework::Scope *scope); +void FetchOuts(const std::vector &names, + const framework::Scope &scope, + std::vector *outs); + +void ShareIntoScope(const std::vector &ordered_input_names, + const std::vector &vars, + framework::Scope *scope); void ShareParamsIntoScope(const std::vector ¶m_names, const Name2VariableMap ¶ms_dict, diff --git a/paddle/fluid/jit/layer.cc b/paddle/fluid/jit/layer.cc index 6662abd17d2cf225ed42b51f8bcaa8edbfbb8321..f5985d71b03477d9ddf8b7772754db38e6036978 100644 --- a/paddle/fluid/jit/layer.cc +++ b/paddle/fluid/jit/layer.cc @@ -16,9 +16,6 @@ namespace paddle { namespace jit { -// TODO(dev): Make vector, num_slot as in argument -// Layer(const std::shared_ptr& type) : obj_(type, /*num_slot*/ 0U) -// {} Layer::Layer(const std::vector>& infos, const Name2VariableMap& params_dict, const phi::Place& place) @@ -30,7 +27,13 @@ std::shared_ptr Layer::Function(const std::string& name) const { return unit_.Function(name); } -std::vector Layer::forward(const std::vector& inputs) { +std::vector Layer::forward(const std::vector& inputs) { + auto func = Function("forward"); + return (*func)(inputs); +} + +std::vector Layer::forward( + const std::vector& inputs) { auto func = Function("forward"); return (*func)(inputs); } diff --git a/paddle/fluid/jit/layer.h b/paddle/fluid/jit/layer.h index 5c9f61b0d47b336d0affc0654121dc69b41352de..ee75881fc3156215a921e41c8a8d77bfa5556a07 100644 --- a/paddle/fluid/jit/layer.h +++ b/paddle/fluid/jit/layer.h @@ -32,9 +32,6 @@ using Name2VariableMap = std::unordered_map; class Layer { public: - // TODO(dev): Make vector, num_slot as in argument - // Layer(const std::shared_ptr& type) : obj_(type, /*num_slot*/ 0U) - // {} Layer(const std::vector>& infos, const Name2VariableMap& params_dict, const phi::Place& place); @@ -43,7 +40,9 @@ class Layer { Variable Attribute(const std::string& name) const; - std::vector forward(const std::vector& inputs); + std::vector forward(const std::vector& inputs); + + std::vector forward(const std::vector& inputs); void to(const phi::Place& place); @@ -55,7 +54,6 @@ class Layer { const Name2FunctionMap& FunctionMap() const; private: - // internal::Object obj_; Name2VariableMap params_dict_; Name2VariableMap attrs_dict_; CompilationUnit unit_; diff --git a/paddle/fluid/jit/layer_test.cc b/paddle/fluid/jit/layer_test.cc index 6c9adff385abad95d586dba895ef7a04a89a1077..793afacb79dc7fc2e936d72bfaf988726c0f14f1 100644 --- a/paddle/fluid/jit/layer_test.cc +++ b/paddle/fluid/jit/layer_test.cc @@ -52,17 +52,16 @@ namespace paddle { namespace jit { using DenseTensor = phi::DenseTensor; -std::vector PrepareInputs(const phi::Place& place) { +std::vector PrepareInputs(const phi::Place& place) { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& dev_ctx = *pool.Get(place); - Variable v; - auto* dense_tensor = v.GetMutable(); - dense_tensor->Resize(phi::make_ddim({2, 4})); - dense_tensor->mutable_data(place); - phi::funcs::set_constant(dev_ctx, dense_tensor, 2.); + DenseTensor t; + t.Resize(phi::make_ddim({2, 4})); + t.mutable_data(place); + phi::funcs::set_constant(dev_ctx, &t, 2.); - return {v}; + return {t}; } TEST(CpuLayerTest, Construct) { @@ -72,16 +71,12 @@ TEST(CpuLayerTest, Construct) { auto inputs = PrepareInputs(place); auto outs = layer.forward(inputs); - auto out_vars = outs[0]; - auto out_dense_tensor = out_vars.Get(); - auto out_data = out_dense_tensor.data(); + auto out_data = outs[0].data(); EXPECT_NEAR(out_data[0], 0.02194316, 1e-6); auto func = layer.Function("infer"); outs = (*func)(inputs); - out_vars = outs[0]; - out_dense_tensor = out_vars.Get(); - out_data = out_dense_tensor.data(); + out_data = outs[0].data(); EXPECT_NEAR(out_data[0], 1.41562390, 1e-6); } @@ -98,8 +93,7 @@ TEST(GpuLayerTest, Construct) { auto inputs = PrepareInputs(place); auto outs = layer.forward(inputs); - auto out_vars = outs[0]; - auto out_dense_tensor = out_vars.Get(); + auto out_dense_tensor = outs[0]; phi::Copy( *dev_ctx_gpu, out_dense_tensor, phi::CPUPlace(), true, &cpu_dense_tensor); auto out_data = cpu_dense_tensor.data(); @@ -107,8 +101,7 @@ TEST(GpuLayerTest, Construct) { auto func = layer.Function("infer"); outs = (*func)(inputs); - out_vars = outs[0]; - out_dense_tensor = out_vars.Get(); + out_dense_tensor = outs[0]; phi::Copy( *dev_ctx_gpu, out_dense_tensor, phi::CPUPlace(), true, &cpu_dense_tensor); out_data = cpu_dense_tensor.data(); diff --git a/paddle/fluid/jit/pe_function.h b/paddle/fluid/jit/pe_function.h index a77fd5935866018ca27ef664556be7dd65861a83..f174a0e996467d086df2a2baa2439e98bf0b2fee 100644 --- a/paddle/fluid/jit/pe_function.h +++ b/paddle/fluid/jit/pe_function.h @@ -43,24 +43,29 @@ class PEFunction : public BaseFunction { ~PEFunction() noexcept {} - std::vector operator()(const std::vector &inputs) { - // bool is_test = true; + std::vector operator()(const std::vector &inputs) { + auto dense_tensors = utils::ToDenseTensors(inputs); + return utils::ToTensors(this->operator()(dense_tensors)); + } + + std::vector operator()(const std::vector &inputs) { std::string prog_string; std::hash string_hash; auto &program_desc = info_->ProgramDesc(); + // TODO(dev): Serialize is very slow. const_cast(&program_desc) ->Proto() ->SerializePartialToString(&prog_string); - // program_desc.Proto()->SerializePartialToString(&prog_string); + int64_t program_id = static_cast(string_hash(prog_string)); const framework::BlockDesc &global_block = program_desc.Block(0); int64_t start_op_index = 0; int64_t end_op_index = static_cast(global_block.OpSize()); - utils::ShareInputsIntoScope(info_->InputArgNames(), inputs, &scope_); + utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_); std::vector input_var_names = info_->InputArgNames(); std::vector output_var_names = info_->OutputArgNames(); - std::vector dout_var_names; + if (end_op_index > start_op_index) { auto cache_info = framework::GetExecutorInfoFromCache(program_desc, place_, @@ -78,9 +83,7 @@ class PEFunction : public BaseFunction { skip_eager_delete_vars.insert(skip_eager_delete_vars.end(), output_var_names.begin(), output_var_names.end()); - skip_eager_delete_vars.insert(skip_eager_delete_vars.end(), - dout_var_names.begin(), - dout_var_names.end()); + framework::details::ParseSafeEagerDeletionSkipVars( program_desc, end_op_index, @@ -89,9 +92,8 @@ class PEFunction : public BaseFunction { } parallel_executor->RunWithoutFetch(skip_eager_delete_vars); } - VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); - std::vector res; - utils::FetchVarsByNames(info_->OutputArgNames(), scope_, &res); + std::vector res; + utils::FetchOuts(info_->OutputArgNames(), scope_, &res); return res; } diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index f256787805a0f49303b995559442ec99ea582b07..3fe2cb170d7963eb7907367a2933f64069032320 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -357,6 +357,19 @@ static std::vector CastAttrsToTragetType( return res; } +static PyObject* eager_api_jit_function_call(PyObject* self, + PyObject* args, + PyObject* kwargs) { + EAGER_TRY + std::shared_ptr function = + CastPyArg2BaseFunction(PyTuple_GET_ITEM(args, 0), 0); + std::vector ins = + CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1); + std::vector outs = (*function)(ins); + return ToPyObject(outs); + EAGER_CATCH_AND_THROW_RETURN_NULL +} + static PyObject* eager_api_run_costum_op(PyObject* self, PyObject* args, PyObject* kwargs) { @@ -911,6 +924,10 @@ PyMethodDef variable_functions[] = { (PyCFunction)(void (*)(void))eager_api_read_next_tensor_list, METH_VARARGS | METH_KEYWORDS, NULL}, + {"jit_function_call", + (PyCFunction)(void (*)(void))eager_api_jit_function_call, + METH_VARARGS | METH_KEYWORDS, + NULL}, /**sparse functions**/ {"sparse_coo_tensor", (PyCFunction)(void (*)(void))eager_api_sparse_coo_tensor, diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 9e8065a6a438a6e61ae25e79d8149bf8708105c5..185b81677125d78af9bcb4679feef9dbfd6e7730 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -51,6 +51,7 @@ extern PyTypeObject* g_customplace_pytype; extern PyTypeObject* g_framework_tensor_pytype; extern PyTypeObject* g_framework_lodtensorarray_pytype; extern PyTypeObject* g_custom_op_kernel_ctx_pytype; +extern PyTypeObject* g_executor_function_pytype; int TensorDtype2NumpyDtype(phi::DataType dtype) { switch (dtype) { @@ -227,6 +228,21 @@ std::shared_ptr CastPyArg2VarBase(PyObject* obj, return py::cast>(obj); } +std::shared_ptr CastPyArg2BaseFunction(PyObject* obj, + ssize_t arg_pos) { + if (PyObject_IsInstance( + obj, reinterpret_cast(g_executor_function_pytype))) { + return ::pybind11::handle(obj) + .cast>(); + } else { + PADDLE_THROW(platform::errors::InvalidArgument( + "argument (position %d) must be " + "BaseFunction, but got %s", + arg_pos + 1, + reinterpret_cast(obj->ob_type)->tp_name)); + } +} + std::vector CastPyArg2VectorOfTensor( PyObject* obj, ssize_t arg_pos) { std::vector result; diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 25dcd91bed0d147ab8601afadf027a24375ef427..b97dcb9cddbecf6330017e3754e03f9594ec5395 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -19,6 +19,7 @@ typedef SSIZE_T ssize_t; #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/jit/executor_function.h" #include "paddle/fluid/platform/place.h" #include "paddle/phi/common/backend.h" #include "paddle/phi/common/data_type.h" @@ -72,6 +73,8 @@ framework::proto::VarType::Type CastPyArg2ProtoType(PyObject* obj, std::unordered_map CastPyArg2Vocab(PyObject* obj, ssize_t arg_pos); std::vector CastPyArg2Strings(PyObject* obj, ssize_t arg_pos); +std::shared_ptr CastPyArg2BaseFunction(PyObject* obj, + ssize_t arg_pos); PyObject* ToPyObject(int value); PyObject* ToPyObject(uint32_t value); diff --git a/paddle/fluid/pybind/jit.cc b/paddle/fluid/pybind/jit.cc index 07b79742f002ec93899f6ca90495b25afbdbde6c..be2ad50400c7704c3a48fbb1f7aea6eb2f0d60c7 100644 --- a/paddle/fluid/pybind/jit.cc +++ b/paddle/fluid/pybind/jit.cc @@ -28,39 +28,21 @@ namespace py = pybind11; namespace paddle { namespace pybind { +PyTypeObject *g_executor_function_pytype = nullptr; using Variable = paddle::framework::Variable; void BindJit(pybind11::module *m) { py::class_(*m, "Layer", R"DOC(Layer Class.)DOC") - .def("function_dict", &jit::Layer::FunctionMap); - - py::class_>( - *m, "ExectorFunction", R"DOC(ExectorFunction Class.)DOC") - .def("__call__", - [](jit::ExecutorFunction &self, - const std::vector> - &tensor_inputs) { - std::vector var_inputs; - for (auto &tensor : tensor_inputs) { - var_inputs.emplace_back(tensor->Var()); - } - auto var_outputs = self(var_inputs); - - std::vector> tensor_outputs; - auto output_names = self.Info()->OutputArgNames(); - for (size_t i = 0; i < var_outputs.size(); ++i) { - auto var = var_outputs[i]; - std::string name = output_names[i]; - imperative::VariableWrapper var_wrapper(name, var); - auto shared_wrapper = - std::make_shared(var_wrapper); - auto shared_varbase = - std::make_shared(shared_wrapper); - tensor_outputs.emplace_back(shared_varbase); - } - return tensor_outputs; - }) - .def("info", &jit::ExecutorFunction::Info); + .def("function_dict", + &jit::Layer::FunctionMap, + py::return_value_policy::reference); + + py::class_> + executor_function( + *m, "ExectorFunction", R"DOC(ExectorFunction Class.)DOC"); + g_executor_function_pytype = + reinterpret_cast(executor_function.ptr()); + executor_function.def("info", &jit::ExecutorFunction::Info); py::class_>( *m, "FunctionInfo", R"DOC(FunctionInfo Class.)DOC") diff --git a/python/paddle/fluid/tests/unittests/test_jit_layer.py b/python/paddle/fluid/tests/unittests/test_jit_layer.py index 24c0131fd7012639a96d18812fc0a89b4b60a569..fd77aa599889ff5ef53e8d9f4844dc5a727fcac6 100644 --- a/python/paddle/fluid/tests/unittests/test_jit_layer.py +++ b/python/paddle/fluid/tests/unittests/test_jit_layer.py @@ -22,7 +22,6 @@ from paddle.fluid.framework import _enable_legacy_dygraph from paddle.jit.layer import Layer from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator -_enable_legacy_dygraph() paddle.seed(1) diff --git a/python/paddle/jit/layer.py b/python/paddle/jit/layer.py index 8ee3652dca8438e9c48acbc78094f38f9bb98f38..4aee7a8f5c02a3e478c45bd988c10777018d11cd 100644 --- a/python/paddle/jit/layer.py +++ b/python/paddle/jit/layer.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from paddle.fluid import core from paddle.fluid.core import Load @@ -39,7 +40,7 @@ class Function(): self.info = FunctionInfo(function.info()) def __call__(self, *args): - return self.function(args) + return core.eager.jit_function_call(self.function, args) class FunctionInfo():