From ec6da792bd8c82b3a8d40dae8c28f0134f83d0c9 Mon Sep 17 00:00:00 2001 From: WangZhen <23097963+0x45f@users.noreply.github.com> Date: Fri, 17 Jun 2022 09:45:52 +0800 Subject: [PATCH] Support multi-program save/load (#41783) * Load models and run PE * Stash all code * Fix C++ UT and refine code * Fix string length error * skip windows ci because windows do not have wget * Just print out in layer_test because bce param file changed * Refine code * Fix pdiparam download error for layer_test * Polish FunctionSchema and rename some arg * New Exector only once * Add value type for ivalue * Read params from one pdiparam file * Replace ivalue using Variable * Remove ivalue file * Fix inner_exe new * Using DeserializeFromStream to load tensor * Split .cc files * Polish cmakelist --- paddle/fluid/CMakeLists.txt | 1 + paddle/fluid/jit/CMakeLists.txt | 38 ++++++++ paddle/fluid/jit/ast.h | 59 +++++++++++++ paddle/fluid/jit/base_function.cc | 124 ++++++++++++++++++++++++++ paddle/fluid/jit/base_function.h | 96 ++++++++++++++++++++ paddle/fluid/jit/compilation_unit.h | 37 ++++++++ paddle/fluid/jit/exector_function.h | 51 +++++++++++ paddle/fluid/jit/layer.cc | 48 ++++++++++ paddle/fluid/jit/layer.h | 59 +++++++++++++ paddle/fluid/jit/layer_test.cc | 87 ++++++++++++++++++ paddle/fluid/jit/object.h | 66 ++++++++++++++ paddle/fluid/jit/pe_function.h | 81 +++++++++++++++++ paddle/fluid/jit/serializer.cc | 131 ++++++++++++++++++++++++++++ paddle/fluid/jit/serializer.h | 75 ++++++++++++++++ 14 files changed, 953 insertions(+) create mode 100644 paddle/fluid/jit/CMakeLists.txt create mode 100644 paddle/fluid/jit/ast.h create mode 100644 paddle/fluid/jit/base_function.cc create mode 100644 paddle/fluid/jit/base_function.h create mode 100644 paddle/fluid/jit/compilation_unit.h create mode 100644 paddle/fluid/jit/exector_function.h create mode 100644 paddle/fluid/jit/layer.cc create mode 100644 paddle/fluid/jit/layer.h create mode 100644 paddle/fluid/jit/layer_test.cc create mode 100644 paddle/fluid/jit/object.h create mode 100644 paddle/fluid/jit/pe_function.h create mode 100644 paddle/fluid/jit/serializer.cc create mode 100644 paddle/fluid/jit/serializer.h diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index 75966399148..47e53e64f59 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -6,5 +6,6 @@ add_subdirectory(imperative) add_subdirectory(operators) add_subdirectory(pybind) add_subdirectory(eager) +add_subdirectory(jit) # NOTE: please add subdirectory inference at last. add_subdirectory(inference) diff --git a/paddle/fluid/jit/CMakeLists.txt b/paddle/fluid/jit/CMakeLists.txt new file mode 100644 index 00000000000..b44060c0fad --- /dev/null +++ b/paddle/fluid/jit/CMakeLists.txt @@ -0,0 +1,38 @@ +cc_library( + jit_serializer + SRCS serializer.cc + DEPS lod_tensor device_context) + +cc_library( + jit_layer + SRCS layer.cc + DEPS executor parallel_executor executor_cache) + +cc_library( + jit_base_function + SRCS base_function.cc + DEPS scope proto_desc) + +if(WITH_TESTING AND NOT WIN32) + add_custom_target( + jit_download_program + COMMAND wget -nc https://paddle-ci.gz.bcebos.com/dy2st/Testing.tar.gz + COMMAND tar zxvf Testing.tar.gz) + set(JIT_DEPS + phi + elementwise_add_op + matmul_v2_op + activation_op + reduce_mean_op + feed_op + fetch_op + scale_op + jit_serializer + jit_layer + jit_base_function) + cc_test( + layer_test + SRCS layer_test.cc + DEPS ${JIT_DEPS}) + add_dependencies(layer_test jit_download_program) +endif() diff --git a/paddle/fluid/jit/ast.h b/paddle/fluid/jit/ast.h new file mode 100644 index 00000000000..535b3a89dd6 --- /dev/null +++ b/paddle/fluid/jit/ast.h @@ -0,0 +1,59 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/variable.h" + +namespace paddle { +namespace jit { +using Variable = paddle::framework::Variable; +class BaseFunction; +class CompilationUnit; + +class ClassType { + public: + ClassType(const std::vector& names, + std::weak_ptr cu) + : const_names_(names), compilation_unit_(cu) {} + + static std::shared_ptr Create( + const std::vector& names, + std::weak_ptr cu) { + return std::make_shared(names, cu); + } + + // const std::vector Methods() const; + + // const Variable& GetAttribute(size_t slot) const; + // const Variable& GetAttribute(const std::string& name) const; + + // size_t AddAttribute(const std::string& name, Variable val); + + private: + // TODO(dev): disingwish parameter and buffer + std::vector const_names_; + std::vector const_value_; + + std::vector methods_; + std::vector static_method_; + std::weak_ptr compilation_unit_; +}; + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/base_function.cc b/paddle/fluid/jit/base_function.cc new file mode 100644 index 00000000000..fcbe64de8d7 --- /dev/null +++ b/paddle/fluid/jit/base_function.cc @@ -0,0 +1,124 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/jit/base_function.h" + +namespace paddle { +namespace jit { + +Argument::Argument(const std::string &name, bool is_out) + : name_(name), is_output_(is_out) {} + +const std::string &Argument::Name() const { return name_; } + +std::vector FunctionSchema::GetInputArgNames() { + std::vector input_arg_names; + for (auto &arg : input_args) { + input_arg_names.emplace_back(arg.Name()); + } + return input_arg_names; +} + +std::vector FunctionSchema::GetOutputArgNames() { + std::vector output_arg_names; + for (auto &arg : output_args) { + output_arg_names.emplace_back(arg.Name()); + } + return output_arg_names; +} + +void FunctionSchema::AddInputArg(std::string name, bool is_output) { + input_args.emplace_back(name, is_output); +} + +void FunctionSchema::AddOutputArg(std::string name, bool is_output) { + output_args.emplace_back(name, is_output); +} + +BaseFunction::BaseFunction( + const framework::ProgramDesc &program_desc, + const std::vector param_names_for_program, + const VariableNameMap ¶ms_dict) + : program_desc_(program_desc) { + // Parse FunctionSchema + // skip_var_name_ = program_desc_.GetFetchTargetNames(); + for (auto &in_name : program_desc_.GetFeedTargetNames()) { + schema_.AddInputArg(in_name, false); + } + for (auto &out_name : program_desc_.GetFetchTargetNames()) { + schema_.AddOutputArg(out_name, true); + } + // share params into scope + SharePartialIntoScope(param_names_for_program, params_dict); + VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); + // remove feed fetch op + RemoveFeedFetch(); +} + +void BaseFunction::FetchOutput(std::vector *outs) { + for (auto &out_name : schema_.GetOutputArgNames()) { + VLOG(3) << "fetch out: " << out_name; + auto *var = scope_.FindVar(out_name); + auto &src_tensor = var->Get(); + Variable v; + auto *p = v.GetMutable(); + *p = src_tensor; + outs->emplace_back(v); + } +} + +void BaseFunction::ShareIntoScope(const VariableNameMap &ivals) { + VLOG(3) << "ivals size: " << ivals.size(); + for (auto it = ivals.begin(); it != ivals.end(); ++it) { + VLOG(3) << "share into scope: " << it->first; + DenseTensor dense_tensor = it->second.Get(); + auto *var = scope_.Var(it->first); + auto *dst_tensor = var->GetMutable(); + *dst_tensor = dense_tensor; + } +} + +void BaseFunction::SharePartialIntoScope( + const std::vector param_names_for_program, + const VariableNameMap ¶ms_dict) { + VLOG(3) << "ivals size: " << param_names_for_program.size(); + for (size_t i = 0; i < param_names_for_program.size(); ++i) { + std::string name = param_names_for_program[i]; + Variable val = params_dict.find(name)->second; + auto &dense_tensor = val.Get(); + VLOG(3) << "share into scope: " << name; + auto *var = scope_.Var(name); + auto *dst_tensor = var->GetMutable(); + *dst_tensor = dense_tensor; + } +} + +void BaseFunction::RemoveFeedFetch() { + for (size_t i = 0; i < program_desc_.Size(); ++i) { + auto *block = program_desc_.MutableBlock(i); + const auto &all_ops = block->AllOps(); + size_t op_size = all_ops.size(); + VLOG(3) << "op_size: " << op_size; + for (int i = op_size - 1; i >= 0; i--) { + auto op = all_ops[i]; + if (op->Type() == "feed" || op->Type() == "fetch") { + VLOG(3) << "remove op type: " << op->Type() << ", index: " << i; + block->RemoveOp(i, i + 1); + } + } + } +} + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/base_function.h b/paddle/fluid/jit/base_function.h new file mode 100644 index 00000000000..3d4f9a29eb6 --- /dev/null +++ b/paddle/fluid/jit/base_function.h @@ -0,0 +1,96 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/phi/core/dense_tensor.h" +#include "paddle/utils/none.h" +#include "paddle/utils/optional.h" + +namespace paddle { +namespace jit { + +using Variable = paddle::framework::Variable; +using VariableNameMap = std::map; +using DenseTensor = phi::DenseTensor; + +class Argument { + public: + explicit Argument(const std::string &name, bool is_out = false); + + const std::string &Name() const; + + private: + std::string name_; + // paddle::optional default_val_; + bool is_output_; +}; + +class FunctionSchema { + public: + FunctionSchema() = default; + + std::vector GetInputArgNames(); + + std::vector GetOutputArgNames(); + + void AddInputArg(std::string name, bool is_output); + + void AddOutputArg(std::string name, bool is_output); + + private: + std::vector input_args; + std::vector output_args; +}; + +// TODO(dev): make it as abstract class +class BaseFunction { + public: + BaseFunction(const framework::ProgramDesc &program_desc, + const std::vector param_names_for_program, + const VariableNameMap ¶ms_dict); + + virtual ~BaseFunction() {} + + virtual std::vector operator()(const VariableNameMap &inputs) = 0; + + protected: + void FetchOutput(std::vector *outs); + + void ShareIntoScope(const VariableNameMap &ivals); + + void SharePartialIntoScope( + const std::vector param_names_for_program, + const VariableNameMap ¶ms_dict); + + void RemoveFeedFetch(); + + protected: + framework::ProgramDesc program_desc_; + // TODO(dev): need a better way to share params + // std::vector ¶m_for_program_; + // std::vector skip_var_name_; + FunctionSchema schema_; + // global_scope place params + framework::Scope scope_; + // framework::Executor inner_exe_; +}; + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/compilation_unit.h b/paddle/fluid/jit/compilation_unit.h new file mode 100644 index 00000000000..815e9d3f4c0 --- /dev/null +++ b/paddle/fluid/jit/compilation_unit.h @@ -0,0 +1,37 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include + +namespace paddle { +namespace jit { +class BaseFunction; + +class CompilationUnit { + public: + CompilationUnit() = default; + ~CompilationUnit() {} + + private: + std::vector> functions_; + std::unordered_map functions_idx_; +}; + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/exector_function.h b/paddle/fluid/jit/exector_function.h new file mode 100644 index 00000000000..3217c62fbd7 --- /dev/null +++ b/paddle/fluid/jit/exector_function.h @@ -0,0 +1,51 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/jit/base_function.h" + +namespace paddle { +namespace jit { + +class ExectorFunction : public BaseFunction { + public: + ExectorFunction(const framework::ProgramDesc &program_desc, + const std::vector param_names_for_program, + const VariableNameMap ¶ms_dict) + : BaseFunction(program_desc, param_names_for_program, params_dict), + inner_exe_(phi::CPUPlace()) {} + + ~ExectorFunction() {} + + std::vector operator()(const VariableNameMap &inputs) { + // share input into scope + ShareIntoScope(inputs); + // run program + inner_exe_.Run(program_desc_, &scope_, /*blockID=*/0, false, true, + schema_.GetOutputArgNames()); + VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); + // fetch outputs + std::vector res; + FetchOutput(&res); + return res; + } + + private: + // TODO(dev): support other devices exe + framework::Executor inner_exe_; +}; + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/layer.cc b/paddle/fluid/jit/layer.cc new file mode 100644 index 00000000000..cb13a003aff --- /dev/null +++ b/paddle/fluid/jit/layer.cc @@ -0,0 +1,48 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/jit/layer.h" + +namespace paddle { +namespace jit { +// TODO(dev): Make vector, num_slot as in argument +// Layer(const std::shared_ptr& type) : obj_(type, /*num_slot*/ 0U) +// {} +Layer::Layer( + const std::vector& func_names, + const std::vector& program_descs, + const std::vector>& param_names_for_each_program, + const VariableNameMap& params_dict) { + VLOG(3) << "program size: " << program_descs.size(); + // Layer manage the life time of all parameter. + for (size_t i = 0; i < func_names.size(); ++i) { + // TODO(dev): choose exector or pe by flag + function_dict[func_names[i]] = std::make_shared( + program_descs[i], param_names_for_each_program[i], params_dict); + } +} + +// TODO(dev): make it as const function +std::shared_ptr Layer::GetFunction(const std::string& name) { + VLOG(3) << "funcs_ size: " << function_dict.size(); + return function_dict[name]; +} + +std::vector Layer::forward(const VariableNameMap& inputs) { + auto func = GetFunction("forward"); + return (*func)(inputs); +} + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/layer.h b/paddle/fluid/jit/layer.h new file mode 100644 index 00000000000..0c2ad49c771 --- /dev/null +++ b/paddle/fluid/jit/layer.h @@ -0,0 +1,59 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include + +#include "paddle/fluid/jit/ast.h" +#include "paddle/fluid/jit/base_function.h" +#include "paddle/fluid/jit/compilation_unit.h" +#include "paddle/fluid/jit/exector_function.h" +#include "paddle/fluid/jit/object.h" +#include "paddle/fluid/jit/pe_function.h" + +namespace paddle { +namespace jit { +using Variable = paddle::framework::Variable; +using VariableNameMap = std::map; +using DenseTensor = phi::DenseTensor; + +class Layer { + public: + // TODO(dev): Make vector, num_slot as in argument + // Layer(const std::shared_ptr& type) : obj_(type, /*num_slot*/ 0U) + // {} + Layer( + const std::vector& func_names, + const std::vector& program_descs, + const std::vector>& param_names_for_each_program, + const VariableNameMap& params_dict); + + // TODO(dev): make it as const function + std::shared_ptr GetFunction(const std::string& name); + + std::vector forward(const VariableNameMap& inputs); + + private: + // internal::Object obj_; + // std::vector all_program_desc_; + // std::vector> param_name_for_each_program_; + // std::vector all_param_; + std::map> function_dict; +}; + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/layer_test.cc b/paddle/fluid/jit/layer_test.cc new file mode 100644 index 00000000000..9386569d48d --- /dev/null +++ b/paddle/fluid/jit/layer_test.cc @@ -0,0 +1,87 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/jit/layer.h" + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/variable.h" +#include "paddle/fluid/jit/serializer.h" +#include "paddle/fluid/memory/allocation/allocator_facade.h" +#include "paddle/phi/api/include/tensor.h" +#include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/funcs/math_function.h" + +USE_OP_ITSELF(elementwise_add); +USE_OP_ITSELF(matmul_v2); +USE_OP_ITSELF(relu); +USE_OP_ITSELF(reduce_mean); +USE_OP_ITSELF(feed); +USE_OP_ITSELF(fetch); +USE_OP_ITSELF(scale); + +PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(matmul, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(relu, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(mean, CPU, ALL_LAYOUT); +PD_DECLARE_KERNEL(scale, CPU, ALL_LAYOUT); + +namespace paddle { +namespace jit { + +VariableNameMap PrepareInputs() { + auto temp = DenseTensor(); + temp.Resize(phi::make_ddim({2, 4})); + phi::CPUContext cpu_ctx; + cpu_ctx.SetAllocator(paddle::memory::allocation::AllocatorFacade::Instance() + .GetAllocator(paddle::platform::CPUPlace()) + .get()); + cpu_ctx.Init(); + cpu_ctx.Alloc(&temp); + phi::funcs::set_constant(cpu_ctx, &temp, 2.); + Variable v; + auto *p = v.GetMutable(); + *p = temp; + // TODO(dev): associate the input name + return {{"x", v}}; +} + +TEST(layer, Construct) { + std::string path = "./Testing/"; + auto layer = jit::Load(path); + auto inputs = PrepareInputs(); + + auto outs = layer.forward(inputs); + auto out_vars = outs[0]; + auto out_dense_tensor = out_vars.Get(); + auto out_data = out_dense_tensor.data(); + EXPECT_NEAR(out_data[0], 0.02194316, 1e-6); + + auto func = layer.GetFunction("infer"); + outs = (*func)(inputs); + out_vars = outs[0]; + out_dense_tensor = out_vars.Get(); + out_data = out_dense_tensor.data(); + EXPECT_NEAR(out_data[0], 1.41562390, 1e-6); +} + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/object.h b/paddle/fluid/jit/object.h new file mode 100644 index 00000000000..94aae673760 --- /dev/null +++ b/paddle/fluid/jit/object.h @@ -0,0 +1,66 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/variable.h" + +namespace paddle { +namespace jit { +class ClassType; + +namespace internal { + +class Object { + public: + Object(const std::shared_ptr& type, size_t num_slot) + : type_(type) { + slots_.resize(num_slot); + } + + static std::unique_ptr Create(std::shared_ptr type, + size_t num_slot) { + return std::make_unique(type, num_slot); + } + + std::shared_ptr Type() const { return type_; } + + void SetSlot(size_t slot, Variable val) { + if (slot >= slots_.size()) { + slots_.resize(slot); + } + slots_[slot] = std::move(val); + } + + const Variable& GetSlot(size_t slot) { + // TODO(dev): Add ENFORCE_LT(slot, size()); + return slots_[slot]; + } + + Variable GetAttr(const std::string& name) const; + + void SetAttr(const std::string& name, Variable val); + + private: + std::shared_ptr type_; + // Store Tensors and Attributes + std::vector slots_; +}; + +} // namespace internal +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/pe_function.h b/paddle/fluid/jit/pe_function.h new file mode 100644 index 00000000000..a3d7eb33f71 --- /dev/null +++ b/paddle/fluid/jit/pe_function.h @@ -0,0 +1,81 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/executor_cache.h" +#include "paddle/fluid/jit/base_function.h" + +namespace paddle { +namespace jit { + +class PEFunction : public BaseFunction { + public: + PEFunction(const framework::ProgramDesc &program_desc, + const std::vector param_names_for_program, + const VariableNameMap ¶ms_dict) + : BaseFunction(program_desc, param_names_for_program, params_dict) {} + + ~PEFunction() {} + + std::vector operator()(const VariableNameMap &inputs) { + // bool is_test = true; + std::string prog_string; + std::hash string_hash; + program_desc_.Proto()->SerializePartialToString(&prog_string); + int64_t program_id = static_cast(string_hash(prog_string)); + const framework::BlockDesc &global_block = program_desc_.Block(0); + int64_t start_op_index = 0; + int64_t end_op_index = static_cast(global_block.OpSize()); + + ShareIntoScope(inputs); + std::vector input_var_names = schema_.GetInputArgNames(); + std::vector output_var_names = schema_.GetOutputArgNames(); + std::vector dout_var_names; + if (end_op_index > start_op_index) { + // TODO(dev): support other devices + auto cache_info = framework::GetExecutorInfoFromCache( + program_desc_, phi::CPUPlace(), start_op_index, end_op_index, + /*is_grad=*/false, program_id, &scope_); + auto ¶llel_executor = cache_info.first; + auto &skip_eager_delete_vars = + framework::ExecutorInfoCache::Instance().SkipEagerDeleteVars( + program_id, false); + if (cache_info.second /*is_new_created*/) { + parallel_executor->SkipMemoryReuse(/*scope_idx=*/0, input_var_names); + skip_eager_delete_vars.insert(skip_eager_delete_vars.end(), + output_var_names.begin(), + output_var_names.end()); + skip_eager_delete_vars.insert(skip_eager_delete_vars.end(), + dout_var_names.begin(), + dout_var_names.end()); + framework::details::ParseSafeEagerDeletionSkipVars( + program_desc_, end_op_index, output_var_names, + &skip_eager_delete_vars); + } + parallel_executor->RunWithoutFetch(skip_eager_delete_vars); + } + VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); + std::vector res; + FetchOutput(&res); + return res; + } +}; + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/serializer.cc b/paddle/fluid/jit/serializer.cc new file mode 100644 index 00000000000..a8bd934d12e --- /dev/null +++ b/paddle/fluid/jit/serializer.cc @@ -0,0 +1,131 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/jit/serializer.h" + +namespace paddle { +namespace jit { + +Layer Deserializer::operator()(const std::string& dir_path) { + const auto& file_name_prefixs = GetPdmodelFileNamePrefix(dir_path); + std::vector func_names; + std::vector program_descs; + std::vector> param_names_for_each_program; + // set is ordered + std::set param_names_set; + VariableNameMap params_dict; + for (auto& it : file_name_prefixs) { + func_names.emplace_back(it.first); + + auto program = LoadProgram(dir_path + it.second + PDMODEL_SUFFIX); + program_descs.emplace_back(program); + + // TODO(dev): load int/float params + std::vector persistable_var_names; + auto all_var_desc = program.Block(0).AllVars(); + for (auto* desc_ptr : all_var_desc) { + if (IsPersistable(desc_ptr)) { + persistable_var_names.emplace_back(desc_ptr->Name()); + } + } + + param_names_for_each_program.emplace_back(persistable_var_names); + param_names_set.insert(persistable_var_names.begin(), + persistable_var_names.end()); + } + + // Read from one pdiparams file, refine here + auto params_for_all_program = + ReadTensorData(dir_path + "export.forward.pdiparams", param_names_set); + params_dict.insert(params_for_all_program.begin(), + params_for_all_program.end()); + + return Layer(func_names, program_descs, param_names_for_each_program, + params_dict); +} + +bool Deserializer::IsPersistable(framework::VarDesc* desc_ptr) { + auto type = desc_ptr->GetType(); + if (type == framework::proto::VarType::FEED_MINIBATCH || + type == framework::proto::VarType::FETCH_LIST || + type == framework::proto::VarType::READER || + type == framework::proto::VarType::RAW) { + return false; + } + return desc_ptr->Persistable(); +} + +bool Deserializer::EndsWith(const std::string& str, const std::string& suffix) { + if (str.length() < suffix.length()) { + return false; + } + return str.compare(str.length() - suffix.length(), suffix.length(), suffix) == + 0; +} + +const std::vector> +Deserializer::GetPdmodelFileNamePrefix(const std::string& path) { + std::vector> file_name_prefixs; + DIR* dir = opendir(path.c_str()); + struct dirent* ptr; + while ((ptr = readdir(dir)) != nullptr) { + std::string file_name = ptr->d_name; + if (EndsWith(file_name, PDMODEL_SUFFIX)) { + std::string prefix = file_name.substr( + 0, file_name.length() - std::string(PDMODEL_SUFFIX).length()); + std::string func_name = prefix.substr(prefix.find_first_of(".") + 1); + file_name_prefixs.emplace_back(std::make_pair(func_name, prefix)); + } + } + closedir(dir); + return file_name_prefixs; +} + +VariableNameMap Deserializer::ReadTensorData( + const std::string& file_name, const std::set& var_name) const { + VLOG(3) << "ReadTensorData from: " << file_name; + std::ifstream fin(file_name, std::ios::binary); + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); + // TODO(dev): Support other devices + auto& dev_ctx = *pool.Get(phi::CPUPlace()); + VariableNameMap res; + for (auto it = var_name.begin(); it != var_name.end(); it++) { + VLOG(3) << "load Tensor: " << *it; + Variable v; + // TODO(dev): Support framework::Vocab + DenseTensor* dense_tesnor = v.GetMutable(); + framework::DeserializeFromStream(fin, dense_tesnor, dev_ctx); + res[*it] = v; + } + return res; +} + +framework::ProgramDesc Deserializer::LoadProgram(const std::string& file_name) { + VLOG(3) << "LoadProgram " << file_name; + std::ifstream fin(file_name, std::ios::in | std::ios::binary); + fin.seekg(0, std::ios::end); + std::string buffer(fin.tellg(), ' '); + fin.seekg(0, std::ios::beg); + fin.read(&buffer[0], buffer.size()); + fin.close(); + return framework::ProgramDesc(buffer); +} + +Layer Load(const std::string& file_path) { + auto deserializer = Deserializer(); + return deserializer(file_path); +} + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/serializer.h b/paddle/fluid/jit/serializer.h new file mode 100644 index 00000000000..4036c5add7b --- /dev/null +++ b/paddle/fluid/jit/serializer.h @@ -0,0 +1,75 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include +#include +#include +#include + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/jit/layer.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/phi/core/dense_tensor.h" + +namespace paddle { +namespace jit { +static const char PDMODEL_SUFFIX[] = ".pdmodel"; +static const char PDPARAMS_SUFFIX[] = ".pdiparams"; + +// Export Layer into local disk +class Serializer { + public: + void operator()(const Layer& layer, const std::string& file_dir); + + // private: + // void WriteTensorData(const Layer& layer, const std::string& file_name) + // const; + // void WriteExtraInfo(const Layer& layer, const std::string& file_name) + // const; + // void WriteByteCode(const Layer& layer, const std::string& file_name) + // const; +}; + +class Deserializer { + public: + Layer operator()(const std::string& dir_path); + + private: + bool IsPersistable(framework::VarDesc* desc_ptr); + + bool EndsWith(const std::string& str, const std::string& suffix); + + const std::vector> + GetPdmodelFileNamePrefix(const std::string& path); + + VariableNameMap ReadTensorData(const std::string& file_name, + const std::set& var_name) const; + + // void ReadExtraInfo(const std::string& file_name) const; + // void ReadByteCode(const std::string& file_name) const; + + framework::ProgramDesc LoadProgram(const std::string& file_name); +}; + +void Export(const Layer& layer, const std::string& file_path); + +Layer Load(const std::string& file_path); + +} // namespace jit +} // namespace paddle -- GitLab