未验证 提交 5664306b 编写于 作者: N Nyakku Shigure 提交者: GitHub

[Dy2St] remove deprecated JIT engines (#48298)

上级 1623f1b4
......@@ -34,8 +34,7 @@ cc_library(
cc_library(
jit_function
SRCS function.cc
DEPS jit_function_utils jit_executor_engine jit_pe_engine
jit_interpreter_engine jit_predictor_engine)
DEPS jit_function_utils jit_interpreter_engine jit_predictor_engine)
cc_library(
jit_layer
......@@ -45,8 +44,6 @@ cc_library(
jit_serializer_utils
jit_compilation_unit
jit_function_schema
jit_executor_engine
jit_pe_engine
jit_interpreter_engine
jit_predictor_engine
jit_function)
......
cc_library(
jit_executor_engine
SRCS executor_engine.cc
DEPS executor)
cc_library(
jit_pe_engine
SRCS pe_engine.cc
DEPS parallel_executor)
cc_library(
jit_interpreter_engine
SRCS interpreter_engine.cc
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/phi/core/enforce.h"
namespace paddle {
namespace jit {
ExecutorEngine::ExecutorEngine(const std::shared_ptr<FunctionInfo> &info,
const VariableMap &params_dict,
const phi::Place &place)
: info_(info), place_(place), inner_exe_(place_) {
info_->RemoveDescFeedFetch();
PADDLE_ENFORCE_GT(
static_cast<int64_t>(info_->ProgramDesc().Block(0).OpSize()),
0,
platform::errors::PreconditionNotMet(
"There is no operator in ProgramDesc."));
utils::ShareParamsIntoScope(info_->ParamNames(), params_dict, &scope_);
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_);
}
std::vector<Tensor> ExecutorEngine::operator()(
const std::vector<Tensor> &inputs) {
auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors));
}
std::vector<DenseTensor> ExecutorEngine::operator()(
const std::vector<DenseTensor> &inputs) {
utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
const auto out_names = info_->OutputArgNames();
inner_exe_.Run(info_->ProgramDesc(),
&scope_,
/*blockID=*/0,
false,
true,
out_names);
std::vector<DenseTensor> outputs;
utils::FetchOuts(out_names, scope_, &outputs);
// Erase output vars to avoid data rewriting.
scope_.EraseVars(out_names);
return outputs;
}
const std::shared_ptr<FunctionInfo> &ExecutorEngine::Info() const {
return info_;
}
} // namespace jit
} // namespace paddle
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <vector>
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/function_utils.h"
namespace paddle {
namespace jit {
class ExecutorEngine : public BaseEngine {
public:
ExecutorEngine(const std::shared_ptr<FunctionInfo> &info,
const VariableMap &params_dict,
const phi::Place &place);
~ExecutorEngine() noexcept {}
std::vector<Tensor> operator()(const std::vector<Tensor> &inputs);
std::vector<DenseTensor> operator()(const std::vector<DenseTensor> &inputs);
const std::shared_ptr<FunctionInfo> &Info() const;
private:
std::shared_ptr<FunctionInfo> info_;
framework::Scope scope_;
phi::Place place_;
framework::Executor inner_exe_;
};
} // namespace jit
} // namespace paddle
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/details/build_strategy.h"
#include "paddle/fluid/framework/details/execution_strategy.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/parallel_executor.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/phi/core/enforce.h"
namespace paddle {
namespace jit {
static ExecutionStrategy GetExecutionStrategy(const platform::Place &place) {
ExecutionStrategy execution_strategy;
auto device_type = platform::Place2DeviceType(place);
switch (device_type) {
case platform::DeviceType::CPU: {
execution_strategy.num_threads_ = 1;
break;
}
case platform::DeviceType::CUDA: {
// NOTE: According experiments, one thread is faster in
// most model training.
execution_strategy.num_threads_ = 1;
break;
}
case platform::DeviceType::XPU: {
execution_strategy.num_threads_ = 1;
break;
}
case platform::DeviceType::IPU: {
execution_strategy.num_threads_ = 1;
break;
}
default:
PADDLE_THROW(platform::errors::Unavailable("Unsupported Device type %d.",
device_type));
}
execution_strategy.use_device_ = device_type;
return execution_strategy;
}
PEEngine::PEEngine(const std::shared_ptr<FunctionInfo> &info,
const VariableMap &params_dict,
const phi::Place &place)
: info_(info), place_(place) {
info_->RemoveDescFeedFetch();
PADDLE_ENFORCE_GT(
static_cast<int64_t>(info_->ProgramDesc().Block(0).OpSize()),
0,
platform::errors::PreconditionNotMet(
"There is no operator in ProgramDesc."));
utils::ShareParamsIntoScope(info_->ParamNames(), params_dict, &scope_);
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_);
CreateGraphAndPE();
}
void PEEngine::CreateGraphAndPE() {
framework::details::BuildStrategy build_strategy;
build_strategy.enable_inference_pass_ = true; // use pe to inference
auto execution_strategy = GetExecutionStrategy(place_);
auto &program_desc = info_->ProgramDesc();
const framework::BlockDesc &global_block = program_desc.Block(0);
int64_t start_op_index = 0;
int64_t end_op_index = static_cast<int64_t>(global_block.OpSize());
graph_ = std::make_shared<Graph>(program_desc, start_op_index, end_op_index);
inner_pe_ = std::make_shared<ParallelExecutor>(
place_, &scope_, execution_strategy, build_strategy, graph_.get());
inner_pe_->SkipMemoryReuse(/*scope_idx=*/0, info_->InputArgNames());
}
std::vector<Tensor> PEEngine::operator()(const std::vector<Tensor> &inputs) {
auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors));
}
std::vector<DenseTensor> PEEngine::operator()(
const std::vector<DenseTensor> &inputs) {
utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
const auto out_names = info_->OutputArgNames();
// need to recreate tmp variables in new scope
inner_pe_->PrepareVariables(&scope_);
inner_pe_->RunWithoutFetch(out_names);
std::vector<DenseTensor> outputs;
utils::FetchOuts(out_names, scope_, &outputs);
// Erase output vars to avoid data rewriting.
scope_.EraseVars(out_names);
scope_.DropKids();
return outputs;
}
const std::shared_ptr<FunctionInfo> &PEEngine::Info() const { return info_; }
} // namespace jit
} // namespace paddle
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <vector>
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/function_utils.h"
namespace paddle {
namespace framework {
class ParallelExecutor;
namespace details {
class ExecutionStrategy;
}
namespace ir {
class Graph;
}
} // namespace framework
namespace jit {
using ExecutionStrategy = framework::details::ExecutionStrategy;
using ParallelExecutor = framework::ParallelExecutor;
using Graph = framework::ir::Graph;
class PEEngine : public BaseEngine {
public:
PEEngine(const std::shared_ptr<FunctionInfo> &info,
const VariableMap &params_dict,
const phi::Place &place);
~PEEngine() noexcept {}
void CreateGraphAndPE();
std::vector<Tensor> operator()(const std::vector<Tensor> &inputs);
std::vector<DenseTensor> operator()(const std::vector<DenseTensor> &inputs);
const std::shared_ptr<FunctionInfo> &Info() const;
private:
std::shared_ptr<FunctionInfo> info_;
framework::Scope scope_;
phi::Place place_;
std::shared_ptr<ParallelExecutor> inner_pe_;
std::shared_ptr<Graph> graph_;
};
} // namespace jit
} // namespace paddle
......@@ -20,9 +20,7 @@
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/jit/engine/interpreter_engine.h"
#include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/jit/engine/predictor_engine.h"
#include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/property.h"
......@@ -74,14 +72,7 @@ Layer Deserializer::operator()(const std::string& path,
auto& info = it->second;
VLOG(3) << "Add function type: " << FLAGS_jit_engine_type
<< " Function name: " << func_name;
if (FLAGS_jit_engine_type == "Executor") {
layer.SetEngine(
func_name,
utils::MakeEngine<ExecutorEngine>(info, params_dict, place));
} else if (FLAGS_jit_engine_type == "PE") {
layer.SetEngine(func_name,
utils::MakeEngine<PEEngine>(info, params_dict, place));
} else if (FLAGS_jit_engine_type == "New") {
if (FLAGS_jit_engine_type == "New") {
layer.SetEngine(
func_name,
utils::MakeEngine<InterpreterEngine>(info, params_dict, place));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册