未验证 提交 c9ec9508 编写于 作者: W WangZhen 提交者: GitHub

[JitLayer]Clean some include and polish load program code (#43798)

* Polish Layer code

* Refine some api name

* Polish load program code

* Clean smoe include

* Process windows style path

* Move some funtions to utils
上级 ff7da117
...@@ -4,32 +4,38 @@ cc_library( ...@@ -4,32 +4,38 @@ cc_library(
DEPS lod_tensor device_context) DEPS lod_tensor device_context)
cc_library( cc_library(
jit_layer_utils jit_function_utils
SRCS layer_utils.cc SRCS function_utils.cc
DEPS scope proto_desc) DEPS lod_tensor scope proto_desc)
cc_library(
jit_serializer_utils
SRCS serializer_utils.cc
DEPS proto_desc)
cc_library( cc_library(
jit_compilation_unit jit_compilation_unit
SRCS compilation_unit.cc SRCS compilation_unit.cc
DEPS proto_desc executor parallel_executor executor_cache) DEPS proto_desc executor parallel_executor executor_cache)
cc_library(
jit_function_schema
SRCS function_schema.cc
DEPS jit_function_utils)
cc_library( cc_library(
jit_layer jit_layer
SRCS layer.cc SRCS layer.cc
DEPS jit_compilation_unit) DEPS jit_compilation_unit)
cc_library(
jit_function_schema
SRCS function_schema.cc
DEPS jit_layer_utils)
if(WITH_TESTING if(WITH_TESTING
AND NOT WIN32 AND NOT WIN32
AND NOT "$ENV{CI_SKIP_CPP_TEST}" STREQUAL "ON") AND NOT "$ENV{CI_SKIP_CPP_TEST}" STREQUAL "ON")
add_custom_target( add_custom_target(
jit_download_program jit_download_program
COMMAND wget -nc https://paddle-ci.gz.bcebos.com/dy2st/Testing.tar.gz COMMAND wget -nc
COMMAND tar zxvf Testing.tar.gz) https://paddle-ci.gz.bcebos.com/dy2st/multi_program_load.tar.gz
COMMAND tar zxvf multi_program_load.tar.gz)
set(JIT_DEPS set(JIT_DEPS
phi phi
elementwise_add_op elementwise_add_op
...@@ -41,9 +47,10 @@ if(WITH_TESTING ...@@ -41,9 +47,10 @@ if(WITH_TESTING
scale_op scale_op
jit_serializer jit_serializer
jit_layer jit_layer
jit_layer_utils jit_function_utils
jit_function_schema jit_function_schema
jit_compilation_unit) jit_compilation_unit
jit_serializer_utils)
cc_test( cc_test(
layer_test layer_test
SRCS layer_test.cc SRCS layer_test.cc
......
...@@ -14,30 +14,25 @@ ...@@ -14,30 +14,25 @@
#include "paddle/fluid/jit/compilation_unit.h" #include "paddle/fluid/jit/compilation_unit.h"
#include "paddle/phi/core/enforce.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
void CompilationUnit::AddExecutorFunction( std::shared_ptr<BaseFunction> CompilationUnit::Function(
const std::string &func_name,
const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict,
const phi::Place &place) {
function_dict_[func_name] =
std::make_shared<ExecutorFunction>(info, params_dict, place);
}
void CompilationUnit::AddPEFunction(const std::string &func_name,
const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict,
const phi::Place &place) {
function_dict_[func_name] =
std::make_shared<PEFunction>(info, params_dict, place);
}
std::shared_ptr<BaseFunction> CompilationUnit::GetFunction(
const std::string &name) const { const std::string &name) const {
PADDLE_ENFORCE_EQ(
function_dict_.count(name),
1,
platform::errors::InvalidArgument(
"Funciton name %s is not exist in function_dict_.", name));
return function_dict_.at(name); return function_dict_.at(name);
} }
void CompilationUnit::SetFunction(
const std::string &name, const std::shared_ptr<BaseFunction> &function) {
function_dict_[name] = function;
}
} // namespace jit } // namespace jit
} // namespace paddle } // namespace paddle
...@@ -17,9 +17,7 @@ ...@@ -17,9 +17,7 @@
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include "paddle/fluid/jit/executor_function.h" #include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/pe_function.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
...@@ -29,17 +27,10 @@ class CompilationUnit { ...@@ -29,17 +27,10 @@ class CompilationUnit {
CompilationUnit() = default; CompilationUnit() = default;
~CompilationUnit() {} ~CompilationUnit() {}
void AddExecutorFunction(const std::string &func_name, std::shared_ptr<BaseFunction> Function(const std::string &name) const;
const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict,
const phi::Place &place);
void AddPEFunction(const std::string &func_name, void SetFunction(const std::string &name,
const std::shared_ptr<FunctionInfo> &info, const std::shared_ptr<BaseFunction> &function);
const Name2VariableMap &params_dict,
const phi::Place &place);
std::shared_ptr<BaseFunction> GetFunction(const std::string &name) const;
private: private:
std::unordered_map<std::string, std::shared_ptr<BaseFunction>> function_dict_; std::unordered_map<std::string, std::shared_ptr<BaseFunction>> function_dict_;
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include "paddle/fluid/jit/base_function.h" #include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/function_schema.h" #include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/layer_utils.h" #include "paddle/fluid/jit/function_utils.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
...@@ -36,23 +36,23 @@ class ExecutorFunction : public BaseFunction { ...@@ -36,23 +36,23 @@ class ExecutorFunction : public BaseFunction {
const Name2VariableMap &params_dict, const Name2VariableMap &params_dict,
const phi::Place &place) const phi::Place &place)
: info_(info), place_(place), inner_exe_(place_) { : info_(info), place_(place), inner_exe_(place_) {
ShareParamsIntoScope(info_->GetParamNames(), params_dict, &scope_); utils::ShareParamsIntoScope(info_->ParamNames(), params_dict, &scope_);
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_);
} }
~ExecutorFunction() noexcept {} ~ExecutorFunction() noexcept {}
std::vector<Variable> operator()(const std::vector<Variable> &inputs) { std::vector<Variable> operator()(const std::vector<Variable> &inputs) {
ShareInputsIntoScope(info_->GetInputArgNames(), inputs, &scope_); utils::ShareInputsIntoScope(info_->InputArgNames(), inputs, &scope_);
inner_exe_.Run(info_->GetProgramDesc(), inner_exe_.Run(info_->ProgramDesc(),
&scope_, &scope_,
/*blockID=*/0, /*blockID=*/0,
false, false,
true, true,
info_->GetOutputArgNames()); info_->OutputArgNames());
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_);
std::vector<Variable> res; std::vector<Variable> res;
FetchVarsByNames(info_->GetOutputArgNames(), scope_, &res); utils::FetchVarsByNames(info_->OutputArgNames(), scope_, &res);
return res; return res;
} }
......
...@@ -14,6 +14,9 @@ ...@@ -14,6 +14,9 @@
#include "paddle/fluid/jit/function_schema.h" #include "paddle/fluid/jit/function_schema.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/jit/function_utils.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
...@@ -22,7 +25,7 @@ Argument::Argument(const std::string& name, bool is_out) ...@@ -22,7 +25,7 @@ Argument::Argument(const std::string& name, bool is_out)
const std::string& Argument::Name() const { return name_; } const std::string& Argument::Name() const { return name_; }
const std::vector<std::string> FunctionSchema::GetInputArgNames() const { const std::vector<std::string> FunctionSchema::InputArgNames() const {
std::vector<std::string> input_arg_names; std::vector<std::string> input_arg_names;
for (auto& arg : input_args) { for (auto& arg : input_args) {
input_arg_names.emplace_back(arg.Name()); input_arg_names.emplace_back(arg.Name());
...@@ -30,7 +33,7 @@ const std::vector<std::string> FunctionSchema::GetInputArgNames() const { ...@@ -30,7 +33,7 @@ const std::vector<std::string> FunctionSchema::GetInputArgNames() const {
return input_arg_names; return input_arg_names;
} }
const std::vector<std::string> FunctionSchema::GetOutputArgNames() const { const std::vector<std::string> FunctionSchema::OutputArgNames() const {
std::vector<std::string> output_arg_names; std::vector<std::string> output_arg_names;
for (auto& arg : output_args) { for (auto& arg : output_args) {
output_arg_names.emplace_back(arg.Name()); output_arg_names.emplace_back(arg.Name());
...@@ -60,25 +63,25 @@ FunctionInfo::FunctionInfo(const std::string& func_name, ...@@ -60,25 +63,25 @@ FunctionInfo::FunctionInfo(const std::string& func_name,
schema_.AddOutputArg(out_name); schema_.AddOutputArg(out_name);
} }
// remove feed fetch op // remove feed fetch op
RemoveFeedFetch(&program_desc_); utils::RemoveFeedFetch(&program_desc_);
} }
const std::string& FunctionInfo::GetFunctionName() const { return func_name_; } const std::string& FunctionInfo::FunctionName() const { return func_name_; }
const framework::ProgramDesc& FunctionInfo::GetProgramDesc() const { const framework::ProgramDesc& FunctionInfo::ProgramDesc() const {
return program_desc_; return program_desc_;
} }
const std::vector<std::string>& FunctionInfo::GetParamNames() const { const std::vector<std::string>& FunctionInfo::ParamNames() const {
return param_names_; return param_names_;
} }
const std::vector<std::string> FunctionInfo::GetInputArgNames() const { const std::vector<std::string> FunctionInfo::InputArgNames() const {
return schema_.GetInputArgNames(); return schema_.InputArgNames();
} }
const std::vector<std::string> FunctionInfo::GetOutputArgNames() const { const std::vector<std::string> FunctionInfo::OutputArgNames() const {
return schema_.GetOutputArgNames(); return schema_.OutputArgNames();
} }
} // namespace jit } // namespace jit
......
...@@ -14,17 +14,15 @@ ...@@ -14,17 +14,15 @@
#pragma once #pragma once
#include <ostream>
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/program_desc.h"
#include "paddle/phi/core/enforce.h" #include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/jit/layer_utils.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
using Variable = paddle::framework::Variable;
class Argument { class Argument {
public: public:
...@@ -42,9 +40,9 @@ class FunctionSchema { ...@@ -42,9 +40,9 @@ class FunctionSchema {
public: public:
FunctionSchema() = default; FunctionSchema() = default;
const std::vector<std::string> GetInputArgNames() const; const std::vector<std::string> InputArgNames() const;
const std::vector<std::string> GetOutputArgNames() const; const std::vector<std::string> OutputArgNames() const;
void AddInputArg(const std::string& name); void AddInputArg(const std::string& name);
...@@ -62,15 +60,15 @@ class FunctionInfo { ...@@ -62,15 +60,15 @@ class FunctionInfo {
const std::vector<std::string>& param_names, const std::vector<std::string>& param_names,
const framework::ProgramDesc& program_desc); const framework::ProgramDesc& program_desc);
const std::string& GetFunctionName() const; const std::string& FunctionName() const;
const framework::ProgramDesc& GetProgramDesc() const; const framework::ProgramDesc& ProgramDesc() const;
const std::vector<std::string>& GetParamNames() const; const std::vector<std::string>& ParamNames() const;
const std::vector<std::string> GetInputArgNames() const; const std::vector<std::string> InputArgNames() const;
const std::vector<std::string> GetOutputArgNames() const; const std::vector<std::string> OutputArgNames() const;
private: private:
std::string func_name_; std::string func_name_;
......
...@@ -12,11 +12,15 @@ ...@@ -12,11 +12,15 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/fluid/jit/layer_utils.h" #include "paddle/fluid/jit/function_utils.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/var_desc.h"
#include "paddle/phi/core/enforce.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
namespace utils {
void FetchVarsByNames(const std::vector<std::string> &names, void FetchVarsByNames(const std::vector<std::string> &names,
const framework::Scope &scope, const framework::Scope &scope,
std::vector<Variable> *outs) { std::vector<Variable> *outs) {
...@@ -88,5 +92,6 @@ void RemoveFeedFetch(framework::ProgramDesc *program_desc) { ...@@ -88,5 +92,6 @@ void RemoveFeedFetch(framework::ProgramDesc *program_desc) {
} }
} }
} // namespace utils
} // namespace jit } // namespace jit
} // namespace paddle } // namespace paddle
...@@ -18,19 +18,19 @@ ...@@ -18,19 +18,19 @@
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/var_desc.h"
#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/jit/function_schema.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
using Variable = paddle::framework::Variable; using Variable = paddle::framework::Variable;
using Name2VariableMap = std::unordered_map<std::string, Variable>; using Name2VariableMap = std::unordered_map<std::string, Variable>;
using DenseTensor = phi::DenseTensor; using DenseTensor = phi::DenseTensor;
namespace utils {
void FetchVarsByNames(const std::vector<std::string> &names, void FetchVarsByNames(const std::vector<std::string> &names,
const framework::Scope &scope, const framework::Scope &scope,
...@@ -46,5 +46,13 @@ void ShareParamsIntoScope(const std::vector<std::string> &param_names, ...@@ -46,5 +46,13 @@ void ShareParamsIntoScope(const std::vector<std::string> &param_names,
void RemoveFeedFetch(framework::ProgramDesc *program_desc); void RemoveFeedFetch(framework::ProgramDesc *program_desc);
template <typename T>
std::shared_ptr<T> MakeFunction(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict,
const phi::Place &place) {
return std::make_shared<T>(info, params_dict, place);
}
} // namespace utils
} // namespace jit } // namespace jit
} // namespace paddle } // namespace paddle
...@@ -24,23 +24,23 @@ Layer::Layer(const std::vector<std::shared_ptr<FunctionInfo>>& infos, ...@@ -24,23 +24,23 @@ Layer::Layer(const std::vector<std::shared_ptr<FunctionInfo>>& infos,
const phi::Place& place) const phi::Place& place)
: params_dict_(params_dict) { : params_dict_(params_dict) {
VLOG(3) << "infos size: " << infos.size(); VLOG(3) << "infos size: " << infos.size();
// Layer manage the life time of all parameter.
for (size_t i = 0; i < infos.size(); ++i) {
// TODO(dev): choose exector or pe by flag
unit_.AddExecutorFunction(
infos[i]->GetFunctionName(), infos[i], params_dict_, place);
}
} }
std::shared_ptr<BaseFunction> Layer::GetFunction( std::shared_ptr<BaseFunction> Layer::Function(const std::string& name) const {
const std::string& name) const { return unit_.Function(name);
return unit_.GetFunction(name);
} }
std::vector<Variable> Layer::forward(const std::vector<Variable>& inputs) { std::vector<Variable> Layer::forward(const std::vector<Variable>& inputs) {
auto func = GetFunction("forward"); auto func = Function("forward");
return (*func)(inputs); return (*func)(inputs);
} }
void Layer::to(const phi::Place& place) {}
void Layer::SetFunction(const std::string& name,
const std::shared_ptr<BaseFunction>& function) {
unit_.SetFunction(name, function);
}
} // namespace jit } // namespace jit
} // namespace paddle } // namespace paddle
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <memory>
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable.h"
#include "paddle/phi/common/place.h" #include "paddle/phi/common/place.h"
#include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/compilation_unit.h" #include "paddle/fluid/jit/compilation_unit.h"
#include "paddle/fluid/jit/function_schema.h" #include "paddle/fluid/jit/function_schema.h"
...@@ -38,14 +39,17 @@ class Layer { ...@@ -38,14 +39,17 @@ class Layer {
const Name2VariableMap& params_dict, const Name2VariableMap& params_dict,
const phi::Place& place); const phi::Place& place);
std::shared_ptr<BaseFunction> GetFunction(const std::string& name) const; std::shared_ptr<BaseFunction> Function(const std::string& name) const;
Variable GetAttribute(const std::string& name) const; Variable Attribute(const std::string& name) const;
std::vector<Variable> forward(const std::vector<Variable>& inputs); std::vector<Variable> forward(const std::vector<Variable>& inputs);
void to(const phi::Place& place); void to(const phi::Place& place);
void SetFunction(const std::string& name,
const std::shared_ptr<BaseFunction>& function);
private: private:
// internal::Object obj_; // internal::Object obj_;
Name2VariableMap params_dict_; Name2VariableMap params_dict_;
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/tensor_utils.h"
...@@ -51,29 +50,26 @@ PD_DECLARE_KERNEL(scale, GPU, ALL_LAYOUT); ...@@ -51,29 +50,26 @@ PD_DECLARE_KERNEL(scale, GPU, ALL_LAYOUT);
namespace paddle { namespace paddle {
namespace jit { namespace jit {
using DenseTensor = phi::DenseTensor;
std::vector<Variable> PrepareInputs() { std::vector<Variable> PrepareInputs(const phi::Place& place) {
auto default_place = imperative::GetCurrentTracer()->ExpectedPlace();
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto& dev_ctx = *pool.Get(default_place); auto& dev_ctx = *pool.Get(place);
Variable v; Variable v;
auto* dense_tensor = v.GetMutable<DenseTensor>(); auto* dense_tensor = v.GetMutable<DenseTensor>();
dense_tensor->Resize(phi::make_ddim({2, 4})); dense_tensor->Resize(phi::make_ddim({2, 4}));
dense_tensor->mutable_data<float>(default_place); dense_tensor->mutable_data<float>(place);
phi::funcs::set_constant(dev_ctx, dense_tensor, 2.); phi::funcs::set_constant(dev_ctx, dense_tensor, 2.);
return {v}; return {v};
} }
TEST(CpuLayerTest, Construct) { TEST(CpuLayerTest, Construct) {
auto tracer = std::make_shared<paddle::imperative::Tracer>(); auto place = phi::CPUPlace();
paddle::imperative::SetCurrentTracer(tracer); std::string path = "./multi_program_load/export";
imperative::GetCurrentTracer()->SetExpectedPlace(phi::CPUPlace()); auto layer = jit::Load(path, place);
auto inputs = PrepareInputs(place);
std::string path = "./Testing/";
auto layer = jit::Load(path);
auto inputs = PrepareInputs();
auto outs = layer.forward(inputs); auto outs = layer.forward(inputs);
auto out_vars = outs[0]; auto out_vars = outs[0];
...@@ -81,7 +77,7 @@ TEST(CpuLayerTest, Construct) { ...@@ -81,7 +77,7 @@ TEST(CpuLayerTest, Construct) {
auto out_data = out_dense_tensor.data<float>(); auto out_data = out_dense_tensor.data<float>();
EXPECT_NEAR(out_data[0], 0.02194316, 1e-6); EXPECT_NEAR(out_data[0], 0.02194316, 1e-6);
auto func = layer.GetFunction("infer"); auto func = layer.Function("infer");
outs = (*func)(inputs); outs = (*func)(inputs);
out_vars = outs[0]; out_vars = outs[0];
out_dense_tensor = out_vars.Get<DenseTensor>(); out_dense_tensor = out_vars.Get<DenseTensor>();
...@@ -91,18 +87,15 @@ TEST(CpuLayerTest, Construct) { ...@@ -91,18 +87,15 @@ TEST(CpuLayerTest, Construct) {
#if defined(PADDLE_WITH_CUDA) #if defined(PADDLE_WITH_CUDA)
TEST(GpuLayerTest, Construct) { TEST(GpuLayerTest, Construct) {
auto tracer = std::make_shared<paddle::imperative::Tracer>(); auto place = phi::GPUPlace();
paddle::imperative::SetCurrentTracer(tracer);
imperative::GetCurrentTracer()->SetExpectedPlace(phi::GPUPlace(0));
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto& dev_ctx = *pool.Get(imperative::GetCurrentTracer()->ExpectedPlace()); auto& dev_ctx = *pool.Get(place);
const auto* dev_ctx_gpu = static_cast<const phi::GPUContext*>(&dev_ctx); const auto* dev_ctx_gpu = static_cast<const phi::GPUContext*>(&dev_ctx);
DenseTensor cpu_dense_tensor; DenseTensor cpu_dense_tensor;
std::string path = "./Testing/"; std::string path = "./multi_program_load/export";
auto layer = jit::Load(path); auto layer = jit::Load(path, place);
auto inputs = PrepareInputs(); auto inputs = PrepareInputs(place);
auto outs = layer.forward(inputs); auto outs = layer.forward(inputs);
auto out_vars = outs[0]; auto out_vars = outs[0];
...@@ -112,7 +105,7 @@ TEST(GpuLayerTest, Construct) { ...@@ -112,7 +105,7 @@ TEST(GpuLayerTest, Construct) {
auto out_data = cpu_dense_tensor.data<float>(); auto out_data = cpu_dense_tensor.data<float>();
EXPECT_NEAR(out_data[0], 0.02194316, 1e-6); EXPECT_NEAR(out_data[0], 0.02194316, 1e-6);
auto func = layer.GetFunction("infer"); auto func = layer.Function("infer");
outs = (*func)(inputs); outs = (*func)(inputs);
out_vars = outs[0]; out_vars = outs[0];
out_dense_tensor = out_vars.Get<DenseTensor>(); out_dense_tensor = out_vars.Get<DenseTensor>();
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include "paddle/fluid/jit/base_function.h" #include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/function_schema.h" #include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/layer_utils.h" #include "paddle/fluid/jit/function_utils.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
...@@ -37,7 +37,7 @@ class PEFunction : public BaseFunction { ...@@ -37,7 +37,7 @@ class PEFunction : public BaseFunction {
const Name2VariableMap &params_dict, const Name2VariableMap &params_dict,
const phi::Place &place) const phi::Place &place)
: info_(info), place_(place) { : info_(info), place_(place) {
ShareParamsIntoScope(info_->GetParamNames(), params_dict, &scope_); utils::ShareParamsIntoScope(info_->ParamNames(), params_dict, &scope_);
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_);
} }
...@@ -47,7 +47,7 @@ class PEFunction : public BaseFunction { ...@@ -47,7 +47,7 @@ class PEFunction : public BaseFunction {
// bool is_test = true; // bool is_test = true;
std::string prog_string; std::string prog_string;
std::hash<std::string> string_hash; std::hash<std::string> string_hash;
auto &program_desc = info_->GetProgramDesc(); auto &program_desc = info_->ProgramDesc();
const_cast<framework::ProgramDesc *>(&program_desc) const_cast<framework::ProgramDesc *>(&program_desc)
->Proto() ->Proto()
->SerializePartialToString(&prog_string); ->SerializePartialToString(&prog_string);
...@@ -57,12 +57,11 @@ class PEFunction : public BaseFunction { ...@@ -57,12 +57,11 @@ class PEFunction : public BaseFunction {
int64_t start_op_index = 0; int64_t start_op_index = 0;
int64_t end_op_index = static_cast<int64_t>(global_block.OpSize()); int64_t end_op_index = static_cast<int64_t>(global_block.OpSize());
ShareInputsIntoScope(info_->GetInputArgNames(), inputs, &scope_); utils::ShareInputsIntoScope(info_->InputArgNames(), inputs, &scope_);
std::vector<std::string> input_var_names = info_->GetInputArgNames(); std::vector<std::string> input_var_names = info_->InputArgNames();
std::vector<std::string> output_var_names = info_->GetOutputArgNames(); std::vector<std::string> output_var_names = info_->OutputArgNames();
std::vector<std::string> dout_var_names; std::vector<std::string> dout_var_names;
if (end_op_index > start_op_index) { if (end_op_index > start_op_index) {
// TODO(dev): support other devices
auto cache_info = framework::GetExecutorInfoFromCache(program_desc, auto cache_info = framework::GetExecutorInfoFromCache(program_desc,
place_, place_,
start_op_index, start_op_index,
...@@ -92,7 +91,7 @@ class PEFunction : public BaseFunction { ...@@ -92,7 +91,7 @@ class PEFunction : public BaseFunction {
} }
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_);
std::vector<Variable> res; std::vector<Variable> res;
FetchVarsByNames(info_->GetOutputArgNames(), scope_, &res); utils::FetchVarsByNames(info_->OutputArgNames(), scope_, &res);
return res; return res;
} }
......
...@@ -14,24 +14,32 @@ ...@@ -14,24 +14,32 @@
#include "paddle/fluid/jit/serializer.h" #include "paddle/fluid/jit/serializer.h"
#include <set>
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/jit/executor_function.h"
#include "paddle/fluid/jit/serializer_utils.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
Layer Deserializer::operator()(const std::string& dir_path) { Layer Deserializer::operator()(const std::string& path,
const auto& file_name_prefixs = GetPdmodelFileNamePrefix(dir_path); const phi::Place& place) {
const auto& pdmodel_paths = utils::PdmodelFilePaths(path);
// set is ordered // set is ordered
std::set<std::string> param_names_set; std::set<std::string> param_names_set;
std::vector<std::shared_ptr<FunctionInfo>> infos; std::vector<std::shared_ptr<FunctionInfo>> infos;
Name2VariableMap params_dict; Name2VariableMap params_dict;
for (auto& it : file_name_prefixs) { for (auto& it : pdmodel_paths) {
auto& func_name = it.first; auto& func_name = it.first;
auto program_desc = LoadProgram(dir_path + it.second + PDMODEL_SUFFIX); auto program_desc = LoadProgram(it.second);
// TODO(dev): load int/float attrs // TODO(dev): load int/float attrs
std::vector<std::string> persist_var_names; std::vector<std::string> persist_var_names;
auto all_var_desc = program_desc.Block(0).AllVars(); auto all_var_desc = program_desc.Block(0).AllVars();
for (auto* desc_ptr : all_var_desc) { for (auto* desc_ptr : all_var_desc) {
if (IsPersistable(desc_ptr)) { if (utils::IsPersistable(desc_ptr)) {
persist_var_names.emplace_back(desc_ptr->Name()); persist_var_names.emplace_back(desc_ptr->Name());
} }
} }
...@@ -41,52 +49,18 @@ Layer Deserializer::operator()(const std::string& dir_path) { ...@@ -41,52 +49,18 @@ Layer Deserializer::operator()(const std::string& dir_path) {
func_name, persist_var_names, program_desc)); func_name, persist_var_names, program_desc));
} }
auto default_place = imperative::GetCurrentTracer()->ExpectedPlace(); ReadTensorData(path + PDPARAMS_SUFFIX, param_names_set, place, &params_dict);
// Read from one pdiparams file, refine here // ReadAttributeData();
ReadTensorData(dir_path + "export.forward.pdiparams",
param_names_set,
default_place,
&params_dict);
return Layer(infos, params_dict, default_place); Layer layer = Layer(infos, params_dict, place);
}
bool Deserializer::IsPersistable(framework::VarDesc* desc_ptr) { for (auto& info : infos) {
auto type = desc_ptr->GetType(); layer.SetFunction(
if (type == framework::proto::VarType::FEED_MINIBATCH || info->FunctionName(),
type == framework::proto::VarType::FETCH_LIST || utils::MakeFunction<ExecutorFunction>(info, params_dict, place));
type == framework::proto::VarType::READER ||
type == framework::proto::VarType::RAW) {
return false;
} }
return desc_ptr->Persistable();
}
bool Deserializer::EndsWith(const std::string& str, const std::string& suffix) { return layer;
if (str.length() < suffix.length()) {
return false;
}
return str.compare(str.length() - suffix.length(), suffix.length(), suffix) ==
0;
}
// process filename like `export.forward.pdmodel` and `export.infer.pdmodel`
const std::vector<std::pair<std::string, std::string>>
Deserializer::GetPdmodelFileNamePrefix(const std::string& path) {
std::vector<std::pair<std::string, std::string>> file_name_prefixs;
DIR* dir = opendir(path.c_str());
struct dirent* ptr;
while ((ptr = readdir(dir)) != nullptr) {
std::string file_name = ptr->d_name;
if (EndsWith(file_name, PDMODEL_SUFFIX)) {
std::string prefix = file_name.substr(
0, file_name.length() - std::string(PDMODEL_SUFFIX).length());
std::string func_name = prefix.substr(prefix.find_first_of(".") + 1);
file_name_prefixs.emplace_back(std::make_pair(func_name, prefix));
}
}
closedir(dir);
return file_name_prefixs;
} }
void Deserializer::ReadTensorData(const std::string& file_name, void Deserializer::ReadTensorData(const std::string& file_name,
...@@ -107,6 +81,9 @@ void Deserializer::ReadTensorData(const std::string& file_name, ...@@ -107,6 +81,9 @@ void Deserializer::ReadTensorData(const std::string& file_name,
} }
} }
void Deserializer::ReadAttributeData(const std::string& file_path,
Name2VariableMap* attrs_dict) const {}
framework::ProgramDesc Deserializer::LoadProgram(const std::string& file_name) { framework::ProgramDesc Deserializer::LoadProgram(const std::string& file_name) {
VLOG(3) << "LoadProgram " << file_name; VLOG(3) << "LoadProgram " << file_name;
std::ifstream fin(file_name, std::ios::in | std::ios::binary); std::ifstream fin(file_name, std::ios::in | std::ios::binary);
...@@ -118,9 +95,9 @@ framework::ProgramDesc Deserializer::LoadProgram(const std::string& file_name) { ...@@ -118,9 +95,9 @@ framework::ProgramDesc Deserializer::LoadProgram(const std::string& file_name) {
return framework::ProgramDesc(buffer); return framework::ProgramDesc(buffer);
} }
Layer Load(const std::string& file_path) { Layer Load(const std::string& file_path, const phi::Place& place) {
auto deserializer = Deserializer(); auto deserializer = Deserializer();
return deserializer(file_path); return deserializer(file_path, place);
} }
} // namespace jit } // namespace jit
......
...@@ -14,24 +14,15 @@ ...@@ -14,24 +14,15 @@
#pragma once #pragma once
#include <dirent.h>
#include <fstream>
#include <set>
#include <string> #include <string>
#include "paddle/fluid/framework/var_desc.h"
#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/layer.h" #include "paddle/fluid/jit/layer.h"
namespace paddle { namespace paddle {
namespace jit { namespace jit {
static const char PDMODEL_SUFFIX[] = ".pdmodel";
static const char PDPARAMS_SUFFIX[] = ".pdiparams";
// Export Layer into local disk // Export Layer into local disk
class Serializer { class Serializer {
public: public:
...@@ -48,21 +39,17 @@ class Serializer { ...@@ -48,21 +39,17 @@ class Serializer {
class Deserializer { class Deserializer {
public: public:
Layer operator()(const std::string& dir_path); Layer operator()(const std::string& dir_path, const phi::Place& place);
private: private:
bool IsPersistable(framework::VarDesc* desc_ptr);
bool EndsWith(const std::string& str, const std::string& suffix);
const std::vector<std::pair<std::string, std::string>>
GetPdmodelFileNamePrefix(const std::string& path);
void ReadTensorData(const std::string& file_name, void ReadTensorData(const std::string& file_name,
const std::set<std::string>& var_name, const std::set<std::string>& var_name,
const phi::Place& place, const phi::Place& place,
Name2VariableMap* params_dict) const; Name2VariableMap* params_dict) const;
void ReadAttributeData(const std::string& file_path,
Name2VariableMap* attrs_dict) const;
// void ReadExtraInfo(const std::string& file_name) const; // void ReadExtraInfo(const std::string& file_name) const;
// void ReadByteCode(const std::string& file_name) const; // void ReadByteCode(const std::string& file_name) const;
...@@ -71,7 +58,8 @@ class Deserializer { ...@@ -71,7 +58,8 @@ class Deserializer {
void Export(const Layer& layer, const std::string& file_path); void Export(const Layer& layer, const std::string& file_path);
Layer Load(const std::string& file_path); // path should be like 'dirname/file_prefix'
Layer Load(const std::string& path, const phi::Place& place);
} // namespace jit } // namespace jit
} // namespace paddle } // namespace paddle
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/jit/serializer_utils.h"
#include <dirent.h>
#include <fstream>
#include "paddle/fluid/framework/var_desc.h"
namespace paddle {
namespace jit {
namespace utils {
bool IsPersistable(framework::VarDesc* desc_ptr) {
auto type = desc_ptr->GetType();
if (type == framework::proto::VarType::FEED_MINIBATCH ||
type == framework::proto::VarType::FETCH_LIST ||
type == framework::proto::VarType::READER ||
type == framework::proto::VarType::RAW) {
return false;
}
return desc_ptr->Persistable();
}
bool StartsWith(const std::string& str, const std::string& prefix) {
return str.compare(0, prefix.length(), prefix) == 0;
}
bool EndsWith(const std::string& str, const std::string& suffix) {
if (str.length() < suffix.length()) {
return false;
}
return str.compare(str.length() - suffix.length(), suffix.length(), suffix) ==
0;
}
void ReplaceAll(std::string* str,
const std::string& old_value,
const std::string& new_value) {
std::string::size_type pos = 0;
while ((pos = str->find(old_value, pos)) != std::string::npos) {
*str = str->replace(pos, old_value.length(), new_value);
if (new_value.length() > 0) {
pos += new_value.length();
}
}
}
bool FileExists(const std::string& file_path) {
std::ifstream file(file_path.c_str());
return file.good();
}
const std::vector<std::pair<std::string, std::string>> PdmodelFilePaths(
const std::string& path) {
std::vector<std::pair<std::string, std::string>> pdmodel_paths;
std::string format_path = path;
ReplaceAll(&format_path, R"(\\)", "/");
ReplaceAll(&format_path, R"(\)", "/");
std::string layer_prefix =
format_path.substr(format_path.find_last_of("/") + 1);
std::string dir_path =
format_path.substr(0, format_path.length() - layer_prefix.length());
DIR* dir = opendir(dir_path.c_str());
struct dirent* ptr;
while ((ptr = readdir(dir)) != nullptr) {
std::string file_name = ptr->d_name;
if (StartsWith(file_name, layer_prefix) &&
EndsWith(file_name, PDMODEL_SUFFIX)) {
std::string prefix = file_name.substr(
0, file_name.length() - std::string(PDMODEL_SUFFIX).length());
std::string func_name = prefix.substr(prefix.find_first_of(".") + 1);
VLOG(3) << "func_name:" << func_name << "path:" << dir_path + file_name;
if (func_name == layer_prefix) {
pdmodel_paths.emplace_back(
std::make_pair("forward", dir_path + file_name));
} else {
pdmodel_paths.emplace_back(
std::make_pair(func_name, dir_path + file_name));
}
}
}
closedir(dir);
return pdmodel_paths;
}
} // namespace utils
} // namespace jit
} // namespace paddle
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle/fluid/framework/var_desc.h"
namespace paddle {
namespace jit {
static const char PDMODEL_SUFFIX[] = ".pdmodel";
static const char PDPARAMS_SUFFIX[] = ".pdiparams";
namespace utils {
bool IsPersistable(framework::VarDesc* desc_ptr);
bool StartsWith(const std::string& str, const std::string& suffix);
bool EndsWith(const std::string& str, const std::string& suffix);
void ReplaceAll(std::string* str,
const std::string& old_value,
const std::string& new_value);
bool FileExists(const std::string& file_path);
const std::vector<std::pair<std::string, std::string>> PdmodelFilePaths(
const std::string& path);
} // namespace utils
} // namespace jit
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册