未验证 提交 882053dc 编写于 作者: W WangZhen 提交者: GitHub

[JitLayer]Move Function classes to a sub dir (#44844)

* Move Function classes to a sub dir

* Format code
上级 c3a2cdcc
add_subdirectory(function)
proto_library(paddle_jit_property_proto SRCS property.proto)
cc_library(
......@@ -33,8 +34,13 @@ cc_library(
cc_library(
jit_layer
SRCS layer.cc
DEPS jit_serializer jit_function_utils jit_serializer_utils
jit_compilation_unit jit_function_schema)
DEPS jit_serializer
jit_function_utils
jit_serializer_utils
jit_compilation_unit
jit_function_schema
jit_executor_function
jit_pe_function)
if(WITH_TESTING AND NOT WIN32)
add_custom_target(
......
......@@ -14,7 +14,7 @@
#pragma once
#include "base_function.h"
#include "layer.h"
#include "serializer.h"
#include "serializer_utils.h"
#include "function/base_function.h" // NOLINT
#include "layer.h" // NOLINT
#include "serializer.h" // NOLINT
#include "serializer_utils.h" // NOLINT
......@@ -16,7 +16,7 @@
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/function/base_function.h"
namespace paddle {
namespace jit {
......
cc_library(
jit_executor_function
SRCS executor_function.cc
DEPS executor)
cc_library(
jit_pe_function
SRCS pe_function.cc
DEPS parallel_executor)
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/jit/function/executor_function.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/phi/core/enforce.h"
namespace paddle {
namespace jit {
ExecutorFunction::ExecutorFunction(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict,
const phi::Place &place)
: info_(info), place_(place), inner_exe_(place_) {
info_->RemoveDescFeedFetch();
PADDLE_ENFORCE_GT(
static_cast<int64_t>(info_->ProgramDesc().Block(0).OpSize()),
0,
platform::errors::PreconditionNotMet(
"There is no operator in ProgramDesc."));
utils::ShareParamsIntoScope(info_->ParamNames(), params_dict, &scope_);
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_);
}
std::vector<Tensor> ExecutorFunction::operator()(
const std::vector<Tensor> &inputs) {
auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors));
}
std::vector<DenseTensor> ExecutorFunction::operator()(
const std::vector<DenseTensor> &inputs) {
utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
inner_exe_.Run(info_->ProgramDesc(),
&scope_,
/*blockID=*/0,
false,
true,
info_->OutputArgNames());
std::vector<DenseTensor> outputs;
utils::FetchOuts(info_->OutputArgNames(), scope_, &outputs);
return outputs;
}
const std::shared_ptr<FunctionInfo> &ExecutorFunction::Info() const {
return info_;
}
} // namespace jit
} // namespace paddle
......@@ -14,17 +14,12 @@
#pragma once
#include <iostream>
#include <string>
#include <vector>
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/function/base_function.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/function_utils.h"
......@@ -35,39 +30,15 @@ class ExecutorFunction : public BaseFunction {
public:
ExecutorFunction(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict,
const phi::Place &place)
: info_(info), place_(place), inner_exe_(place_) {
info_->RemoveDescFeedFetch();
PADDLE_ENFORCE_GT(
static_cast<int64_t>(info_->ProgramDesc().Block(0).OpSize()),
0,
platform::errors::PreconditionNotMet(
"There is no operator in ProgramDesc."));
utils::ShareParamsIntoScope(info_->ParamNames(), params_dict, &scope_);
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_);
}
const phi::Place &place);
~ExecutorFunction() noexcept {}
std::vector<Tensor> operator()(const std::vector<Tensor> &inputs) {
auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors));
}
std::vector<Tensor> operator()(const std::vector<Tensor> &inputs);
std::vector<DenseTensor> operator()(const std::vector<DenseTensor> &inputs) {
utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
inner_exe_.Run(info_->ProgramDesc(),
&scope_,
/*blockID=*/0,
false,
true,
info_->OutputArgNames());
std::vector<DenseTensor> outputs;
utils::FetchOuts(info_->OutputArgNames(), scope_, &outputs);
return outputs;
}
std::vector<DenseTensor> operator()(const std::vector<DenseTensor> &inputs);
const std::shared_ptr<FunctionInfo> &Info() const { return info_; }
const std::shared_ptr<FunctionInfo> &Info() const;
private:
std::shared_ptr<FunctionInfo> info_;
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/jit/function/pe_function.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/details/build_strategy.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/phi/core/enforce.h"
namespace paddle {
namespace jit {
static ExecutionStrategy GetExecutionStrategy(const platform::Place &place) {
ExecutionStrategy execution_strategy;
auto device_type = platform::Place2DeviceType(place);
switch (device_type) {
case platform::DeviceType::CPU: {
execution_strategy.num_threads_ = 2;
break;
}
case platform::DeviceType::CUDA: {
// NOTE: According experiments, one thread is faster in
// most model training.
execution_strategy.num_threads_ = 1;
break;
}
case platform::DeviceType::XPU: {
execution_strategy.num_threads_ = 1;
break;
}
case platform::DeviceType::IPU: {
execution_strategy.num_threads_ = 1;
break;
}
default:
PADDLE_THROW(platform::errors::Unavailable("Unsupported Device type %d.",
device_type));
}
execution_strategy.use_device_ = device_type;
return execution_strategy;
}
PEFunction::PEFunction(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict,
const phi::Place &place)
: info_(info), place_(place) {
info_->RemoveDescFeedFetch();
PADDLE_ENFORCE_GT(
static_cast<int64_t>(info_->ProgramDesc().Block(0).OpSize()),
0,
platform::errors::PreconditionNotMet(
"There is no operator in ProgramDesc."));
utils::ShareParamsIntoScope(info_->ParamNames(), params_dict, &scope_);
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_);
CreateGraphAndPE();
}
void PEFunction::CreateGraphAndPE() {
framework::details::BuildStrategy build_strategy;
auto execution_strategy = GetExecutionStrategy(place_);
auto &program_desc = info_->ProgramDesc();
const framework::BlockDesc &global_block = program_desc.Block(0);
int64_t start_op_index = 0;
int64_t end_op_index = static_cast<int64_t>(global_block.OpSize());
graph_ = std::make_shared<Graph>(program_desc, start_op_index, end_op_index);
inner_pe_ = std::make_shared<ParallelExecutor>(
place_, &scope_, execution_strategy, build_strategy, graph_.get());
inner_pe_->PrepareVariables(&scope_);
inner_pe_->SkipMemoryReuse(/*scope_idx=*/0, info_->InputArgNames());
}
std::vector<Tensor> PEFunction::operator()(const std::vector<Tensor> &inputs) {
auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors));
}
std::vector<DenseTensor> PEFunction::operator()(
const std::vector<DenseTensor> &inputs) {
utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
// update op_handle scope_map in pe->executor_->Graph
std::unordered_map<framework::Scope *, framework::Scope *> scope_map = {
{inner_pe_->GetLocalScopes().front(), &scope_}};
inner_pe_->ResetOpHandleScopeMapOfGraphs(scope_map);
// need to recreate tmp variables in new scope
inner_pe_->PrepareVariables(&scope_);
inner_pe_->RunWithoutFetch(info_->OutputArgNames());
std::vector<DenseTensor> outputs;
utils::FetchOuts(info_->OutputArgNames(), scope_, &outputs);
scope_.DropKids();
return outputs;
}
const std::shared_ptr<FunctionInfo> &PEFunction::Info() const { return info_; }
} // namespace jit
} // namespace paddle
......@@ -14,21 +14,14 @@
#pragma once
#include <iostream>
#include <string>
#include <vector>
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/details/build_strategy.h"
#include "paddle/fluid/framework/details/execution_strategy.h"
#include "paddle/fluid/framework/executor_cache.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/parallel_executor.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/function/base_function.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/function_utils.h"
......@@ -43,94 +36,17 @@ class PEFunction : public BaseFunction {
public:
PEFunction(const std::shared_ptr<FunctionInfo> &info,
const Name2VariableMap &params_dict,
const phi::Place &place)
: info_(info), place_(place) {
info_->RemoveDescFeedFetch();
PADDLE_ENFORCE_GT(
static_cast<int64_t>(info_->ProgramDesc().Block(0).OpSize()),
0,
platform::errors::PreconditionNotMet(
"There is no operator in ProgramDesc."));
utils::ShareParamsIntoScope(info_->ParamNames(), params_dict, &scope_);
VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_);
CreateGraphAndPE();
}
const phi::Place &place);
~PEFunction() noexcept {}
static ExecutionStrategy GetExecutionStrategy(const platform::Place &place) {
ExecutionStrategy execution_strategy;
void CreateGraphAndPE();
auto device_type = platform::Place2DeviceType(place);
switch (device_type) {
case platform::DeviceType::CPU: {
execution_strategy.num_threads_ = 2;
break;
}
case platform::DeviceType::CUDA: {
// NOTE: According experiments, one thread is faster in
// most model training.
execution_strategy.num_threads_ = 1;
break;
}
case platform::DeviceType::XPU: {
execution_strategy.num_threads_ = 1;
break;
}
case platform::DeviceType::IPU: {
execution_strategy.num_threads_ = 1;
break;
}
default:
PADDLE_THROW(platform::errors::Unavailable(
"Unsupported Device type %d.", device_type));
}
execution_strategy.use_device_ = device_type;
std::vector<Tensor> operator()(const std::vector<Tensor> &inputs);
return execution_strategy;
}
std::vector<DenseTensor> operator()(const std::vector<DenseTensor> &inputs);
void CreateGraphAndPE() {
framework::details::BuildStrategy build_strategy;
auto execution_strategy = GetExecutionStrategy(place_);
auto &program_desc = info_->ProgramDesc();
const framework::BlockDesc &global_block = program_desc.Block(0);
int64_t start_op_index = 0;
int64_t end_op_index = static_cast<int64_t>(global_block.OpSize());
graph_ =
std::make_shared<Graph>(program_desc, start_op_index, end_op_index);
inner_pe_ = std::make_shared<ParallelExecutor>(
place_, &scope_, execution_strategy, build_strategy, graph_.get());
inner_pe_->PrepareVariables(&scope_);
inner_pe_->SkipMemoryReuse(/*scope_idx=*/0, info_->InputArgNames());
}
std::vector<Tensor> operator()(const std::vector<Tensor> &inputs) {
auto dense_tensors = utils::ToDenseTensors(inputs);
return utils::ToTensors(this->operator()(dense_tensors));
}
std::vector<DenseTensor> operator()(const std::vector<DenseTensor> &inputs) {
utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_);
// update op_handle scope_map in pe->executor_->Graph
std::unordered_map<framework::Scope *, framework::Scope *> scope_map = {
{inner_pe_->GetLocalScopes().front(), &scope_}};
inner_pe_->ResetOpHandleScopeMapOfGraphs(scope_map);
// need to recreate tmp variables in new scope
inner_pe_->PrepareVariables(&scope_);
inner_pe_->RunWithoutFetch(info_->OutputArgNames());
std::vector<DenseTensor> outputs;
utils::FetchOuts(info_->OutputArgNames(), scope_, &outputs);
scope_.DropKids();
return outputs;
}
const std::shared_ptr<FunctionInfo> &Info() const { return info_; }
const std::shared_ptr<FunctionInfo> &Info() const;
private:
std::shared_ptr<FunctionInfo> info_;
......
......@@ -16,8 +16,8 @@
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/compilation_unit.h"
#include "paddle/fluid/jit/function/base_function.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/errors.h"
......
......@@ -21,7 +21,7 @@
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/common/place.h"
#include "base_function.h" //NOLINT
#include "function/base_function.h" //NOLINT
namespace paddle {
......
......@@ -20,9 +20,9 @@
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/jit/executor_function.h"
#include "paddle/fluid/jit/function/executor_function.h"
#include "paddle/fluid/jit/function/pe_function.h"
#include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/pe_function.h"
#include "paddle/fluid/jit/property.h"
#include "paddle/fluid/jit/serializer_utils.h"
......
......@@ -22,8 +22,8 @@ limitations under the License. */
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/scope_guard.h"
#include "paddle/fluid/jit/executor_function.h"
#include "paddle/fluid/jit/pe_function.h"
#include "paddle/fluid/jit/function/executor_function.h"
#include "paddle/fluid/jit/function/pe_function.h"
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/operators/py_func_op.h"
#include "paddle/fluid/operators/utils.h"
......
......@@ -20,7 +20,7 @@ typedef SSIZE_T ssize_t;
#include "paddle/fluid/eager/hooks.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/function/base_function.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
......
......@@ -18,10 +18,10 @@ limitations under the License. */
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/jit/executor_function.h"
#include "paddle/fluid/jit/function/executor_function.h"
#include "paddle/fluid/jit/function/pe_function.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/pe_function.h"
#include "paddle/fluid/jit/serializer.h"
namespace py = pybind11;
......
......@@ -630,7 +630,7 @@ headers = (
jit_layer_headers = ['layer.h', 'serializer.h', 'serializer_utils.h', 'all.h', 'base_function.h']
for f in jit_layer_headers:
headers += list(find_files(f, '@PADDLE_SOURCE_DIR@/paddle/fluid/jit', recursive=False))
headers += list(find_files(f, '@PADDLE_SOURCE_DIR@/paddle/fluid/jit', recursive=True))
if '${WITH_MKLDNN}' == 'ON':
headers += list(find_files('*', '${MKLDNN_INSTALL_DIR}/include')) # mkldnn
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册