未验证 提交 22c3cdb4 编写于 作者: L Leo Chen 提交者: GitHub

add interpretercore for jit engine (#46092)

* add interpretercore for jit engine

* add ut
上级 08186f14
......@@ -34,7 +34,8 @@ cc_library(
cc_library(
jit_function
SRCS function.cc
DEPS jit_function_utils jit_executor_engine jit_pe_engine)
DEPS jit_function_utils jit_executor_engine jit_pe_engine
jit_interpreter_engine)
cc_library(
jit_layer
......@@ -46,6 +47,7 @@ cc_library(
jit_function_schema
jit_executor_engine
jit_pe_engine
jit_interpreter_engine
jit_function)
if(WITH_TESTING AND NOT WIN32)
......@@ -65,10 +67,19 @@ if(WITH_TESTING AND NOT WIN32)
feed_op
fetch_op
scale_op
transfer_layout_op
jit_layer)
cc_test(
layer_test
SRCS layer_test.cc
DEPS ${JIT_DEPS})
add_dependencies(layer_test jit_download_program)
cc_test(
layer_test_new
SRCS layer_test.cc
DEPS ${JIT_DEPS})
add_dependencies(layer_test_new jit_download_program)
set_tests_properties(layer_test_new PROPERTIES ENVIRONMENT
"FLAGS_jit_engine_type=New")
endif()
......@@ -7,3 +7,8 @@ cc_library(
jit_pe_engine
SRCS pe_engine.cc
DEPS parallel_executor)
cc_library(
jit_interpreter_engine
SRCS interpreter_engine.cc
DEPS standalone_executor)
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/jit/engine/interpreter_engine.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph_helper.h"
#include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/framework/new_executor/interpretercore.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/phi/core/enforce.h"
namespace paddle {
namespace jit {
InterpreterEngine::InterpreterEngine(const std::shared_ptr<FunctionInfo> &info,
const VariableMap &params_dict,
const phi::Place &place)
: info_(info), place_(place) {
info_->RemoveDescFeedFetch();
PADDLE_ENFORCE_GT(
static_cast<int64_t>(info_->ProgramDesc().Block(0).OpSize()),
0,
platform::errors::PreconditionNotMet(
"There is no operator in ProgramDesc."));
utils::ShareParamsIntoScope(info_->ParamNames(), params_dict, &scope_);
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_);
CreateInterpreterCore();
}
void InterpreterEngine::CreateInterpreterCore() {
auto &program_desc = info_->ProgramDesc();
// apply inference pass
framework::ir::Graph graph{program_desc};
auto pass =
framework::ir::PassRegistry::Instance().Get("delete_dropout_op_x_pass");
pass->Apply(&graph);
#ifdef PADDLE_WITH_MKLDNN
auto mkldnn_pass =
framework::ir::PassRegistry::Instance().Get("mkldnn_placement_pass");
mkldnn_pass->Set("mkldnn_enabled_op_types",
new std::unordered_set<std::string>({}));
mkldnn_pass->Apply(&graph);
#endif
GraphToProgram(graph, &converted_prog_, nullptr);
auto in_names = info_->InputArgNames();
auto out_names = info_->OutputArgNames();
std::set<std::string> skip_gc_vars;
skip_gc_vars.insert(in_names.begin(), in_names.end());
skip_gc_vars.insert(out_names.begin(), out_names.end());
inner_interpreter_ =
std::make_shared<InterpreterCore>(place_,
converted_prog_.Block(0),
/*skip_gc_vars=*/skip_gc_vars,
&scope_,
/*used_for_jit=*/true);
}
std::vector<Tensor> InterpreterEngine::operator()(
const std::vector<Tensor> &inputs) {
auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors));
}
std::vector<DenseTensor> InterpreterEngine::operator()(
const std::vector<DenseTensor> &inputs) {
utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
// the latter can be moved to python side.
auto &feed_names = info_->InputArgNames();
auto &fetch_names = info_->OutputArgNames();
paddle::framework::FetchList outs = inner_interpreter_->Run(feed_names);
std::vector<DenseTensor> outputs;
utils::FetchOuts(info_->OutputArgNames(), scope_, &outputs);
scope_.DropKids();
return outputs;
}
const std::shared_ptr<FunctionInfo> &InterpreterEngine::Info() const {
return info_;
}
} // namespace jit
} // namespace paddle
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <vector>
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/function_utils.h"
namespace paddle {
namespace framework {
class InterpreterCore;
} // namespace framework
namespace jit {
using InterpreterCore = framework::InterpreterCore;
// using Graph = framework::ir::Graph;
class InterpreterEngine : public BaseEngine {
public:
InterpreterEngine(const std::shared_ptr<FunctionInfo> &info,
const VariableMap &params_dict,
const phi::Place &place);
~InterpreterEngine() noexcept {}
void CreateInterpreterCore();
std::vector<Tensor> operator()(const std::vector<Tensor> &inputs);
std::vector<DenseTensor> operator()(const std::vector<DenseTensor> &inputs);
const std::shared_ptr<FunctionInfo> &Info() const;
private:
std::shared_ptr<FunctionInfo> info_;
framework::Scope scope_;
phi::Place place_;
std::shared_ptr<framework::InterpreterCore> inner_interpreter_;
framework::ProgramDesc converted_prog_;
};
} // namespace jit
} // namespace paddle
......@@ -38,6 +38,7 @@ USE_OP_ITSELF(reduce_mean);
USE_OP_ITSELF(feed);
USE_OP_ITSELF(fetch);
USE_OP_ITSELF(scale);
USE_OP_ITSELF(transfer_layout);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(matmul, CPU, ALL_LAYOUT);
......
......@@ -21,6 +21,7 @@
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/jit/engine/interpreter_engine.h"
#include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/property.h"
......@@ -79,6 +80,12 @@ Layer Deserializer::operator()(const std::string& path,
VLOG(3) << "Add function type: PEEngine. Function name: " << func_name;
layer.SetEngine(func_name,
utils::MakeEngine<PEEngine>(info, params_dict, place));
} else if (FLAGS_jit_engine_type == "New") {
VLOG(3) << "Add function type: InterpreterEngine. Function name: "
<< func_name;
layer.SetEngine(
func_name,
utils::MakeEngine<InterpreterEngine>(info, params_dict, place));
} else {
PD_THROW("Invalid JitLayer engine type.");
}
......
......@@ -1011,6 +1011,7 @@ PADDLE_DEFINE_EXPORTED_bool(
* Note:
* FLAGS_jit_engine_type == Executor, using ExecutorEngine by default
* FLAGS_jit_engine_type == PE, using PEEngine by default
* FLAGS_jit_engine_type == New, using InterpreterEngine by default
*/
PADDLE_DEFINE_EXPORTED_string(jit_engine_type,
"PE",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册