Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
5664306b
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
5664306b
编写于
11月 24, 2022
作者:
N
Nyakku Shigure
提交者:
GitHub
11月 24, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Dy2St] remove deprecated JIT engines (#48298)
上级
1623f1b4
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
2 addition
and
323 deletion
+2
-323
paddle/fluid/jit/CMakeLists.txt
paddle/fluid/jit/CMakeLists.txt
+1
-4
paddle/fluid/jit/engine/CMakeLists.txt
paddle/fluid/jit/engine/CMakeLists.txt
+0
-10
paddle/fluid/jit/engine/executor_engine.cc
paddle/fluid/jit/engine/executor_engine.cc
+0
-66
paddle/fluid/jit/engine/executor_engine.h
paddle/fluid/jit/engine/executor_engine.h
+0
-51
paddle/fluid/jit/engine/pe_engine.cc
paddle/fluid/jit/engine/pe_engine.cc
+0
-115
paddle/fluid/jit/engine/pe_engine.h
paddle/fluid/jit/engine/pe_engine.h
+0
-67
paddle/fluid/jit/serializer.cc
paddle/fluid/jit/serializer.cc
+1
-10
未找到文件。
paddle/fluid/jit/CMakeLists.txt
浏览文件 @
5664306b
...
...
@@ -34,8 +34,7 @@ cc_library(
cc_library
(
jit_function
SRCS function.cc
DEPS jit_function_utils jit_executor_engine jit_pe_engine
jit_interpreter_engine jit_predictor_engine
)
DEPS jit_function_utils jit_interpreter_engine jit_predictor_engine
)
cc_library
(
jit_layer
...
...
@@ -45,8 +44,6 @@ cc_library(
jit_serializer_utils
jit_compilation_unit
jit_function_schema
jit_executor_engine
jit_pe_engine
jit_interpreter_engine
jit_predictor_engine
jit_function
)
...
...
paddle/fluid/jit/engine/CMakeLists.txt
浏览文件 @
5664306b
cc_library
(
jit_executor_engine
SRCS executor_engine.cc
DEPS executor
)
cc_library
(
jit_pe_engine
SRCS pe_engine.cc
DEPS parallel_executor
)
cc_library
(
jit_interpreter_engine
SRCS interpreter_engine.cc
...
...
paddle/fluid/jit/engine/executor_engine.cc
已删除
100644 → 0
浏览文件 @
1623f1b4
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/phi/core/enforce.h"
namespace
paddle
{
namespace
jit
{
ExecutorEngine
::
ExecutorEngine
(
const
std
::
shared_ptr
<
FunctionInfo
>
&
info
,
const
VariableMap
&
params_dict
,
const
phi
::
Place
&
place
)
:
info_
(
info
),
place_
(
place
),
inner_exe_
(
place_
)
{
info_
->
RemoveDescFeedFetch
();
PADDLE_ENFORCE_GT
(
static_cast
<
int64_t
>
(
info_
->
ProgramDesc
().
Block
(
0
).
OpSize
()),
0
,
platform
::
errors
::
PreconditionNotMet
(
"There is no operator in ProgramDesc."
));
utils
::
ShareParamsIntoScope
(
info_
->
ParamNames
(),
params_dict
,
&
scope_
);
VLOG
(
6
)
<<
framework
::
GenScopeTreeDebugInfo
(
&
scope_
);
}
std
::
vector
<
Tensor
>
ExecutorEngine
::
operator
()(
const
std
::
vector
<
Tensor
>
&
inputs
)
{
auto
dense_tensors
=
utils
::
ToDenseTensors
(
inputs
);
return
utils
::
ToTensors
(
this
->
operator
()(
dense_tensors
));
}
std
::
vector
<
DenseTensor
>
ExecutorEngine
::
operator
()(
const
std
::
vector
<
DenseTensor
>
&
inputs
)
{
utils
::
ShareIntoScope
(
info_
->
InputArgNames
(),
inputs
,
&
scope_
);
const
auto
out_names
=
info_
->
OutputArgNames
();
inner_exe_
.
Run
(
info_
->
ProgramDesc
(),
&
scope_
,
/*blockID=*/
0
,
false
,
true
,
out_names
);
std
::
vector
<
DenseTensor
>
outputs
;
utils
::
FetchOuts
(
out_names
,
scope_
,
&
outputs
);
// Erase output vars to avoid data rewriting.
scope_
.
EraseVars
(
out_names
);
return
outputs
;
}
const
std
::
shared_ptr
<
FunctionInfo
>
&
ExecutorEngine
::
Info
()
const
{
return
info_
;
}
}
// namespace jit
}
// namespace paddle
paddle/fluid/jit/engine/executor_engine.h
已删除
100644 → 0
浏览文件 @
1623f1b4
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <vector>
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/function_utils.h"
namespace
paddle
{
namespace
jit
{
class
ExecutorEngine
:
public
BaseEngine
{
public:
ExecutorEngine
(
const
std
::
shared_ptr
<
FunctionInfo
>
&
info
,
const
VariableMap
&
params_dict
,
const
phi
::
Place
&
place
);
~
ExecutorEngine
()
noexcept
{}
std
::
vector
<
Tensor
>
operator
()(
const
std
::
vector
<
Tensor
>
&
inputs
);
std
::
vector
<
DenseTensor
>
operator
()(
const
std
::
vector
<
DenseTensor
>
&
inputs
);
const
std
::
shared_ptr
<
FunctionInfo
>
&
Info
()
const
;
private:
std
::
shared_ptr
<
FunctionInfo
>
info_
;
framework
::
Scope
scope_
;
phi
::
Place
place_
;
framework
::
Executor
inner_exe_
;
};
}
// namespace jit
}
// namespace paddle
paddle/fluid/jit/engine/pe_engine.cc
已删除
100644 → 0
浏览文件 @
1623f1b4
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/details/build_strategy.h"
#include "paddle/fluid/framework/details/execution_strategy.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/parallel_executor.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/phi/core/enforce.h"
namespace
paddle
{
namespace
jit
{
static
ExecutionStrategy
GetExecutionStrategy
(
const
platform
::
Place
&
place
)
{
ExecutionStrategy
execution_strategy
;
auto
device_type
=
platform
::
Place2DeviceType
(
place
);
switch
(
device_type
)
{
case
platform
::
DeviceType
::
CPU
:
{
execution_strategy
.
num_threads_
=
1
;
break
;
}
case
platform
::
DeviceType
::
CUDA
:
{
// NOTE: According experiments, one thread is faster in
// most model training.
execution_strategy
.
num_threads_
=
1
;
break
;
}
case
platform
::
DeviceType
::
XPU
:
{
execution_strategy
.
num_threads_
=
1
;
break
;
}
case
platform
::
DeviceType
::
IPU
:
{
execution_strategy
.
num_threads_
=
1
;
break
;
}
default:
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
"Unsupported Device type %d."
,
device_type
));
}
execution_strategy
.
use_device_
=
device_type
;
return
execution_strategy
;
}
PEEngine
::
PEEngine
(
const
std
::
shared_ptr
<
FunctionInfo
>
&
info
,
const
VariableMap
&
params_dict
,
const
phi
::
Place
&
place
)
:
info_
(
info
),
place_
(
place
)
{
info_
->
RemoveDescFeedFetch
();
PADDLE_ENFORCE_GT
(
static_cast
<
int64_t
>
(
info_
->
ProgramDesc
().
Block
(
0
).
OpSize
()),
0
,
platform
::
errors
::
PreconditionNotMet
(
"There is no operator in ProgramDesc."
));
utils
::
ShareParamsIntoScope
(
info_
->
ParamNames
(),
params_dict
,
&
scope_
);
VLOG
(
6
)
<<
framework
::
GenScopeTreeDebugInfo
(
&
scope_
);
CreateGraphAndPE
();
}
void
PEEngine
::
CreateGraphAndPE
()
{
framework
::
details
::
BuildStrategy
build_strategy
;
build_strategy
.
enable_inference_pass_
=
true
;
// use pe to inference
auto
execution_strategy
=
GetExecutionStrategy
(
place_
);
auto
&
program_desc
=
info_
->
ProgramDesc
();
const
framework
::
BlockDesc
&
global_block
=
program_desc
.
Block
(
0
);
int64_t
start_op_index
=
0
;
int64_t
end_op_index
=
static_cast
<
int64_t
>
(
global_block
.
OpSize
());
graph_
=
std
::
make_shared
<
Graph
>
(
program_desc
,
start_op_index
,
end_op_index
);
inner_pe_
=
std
::
make_shared
<
ParallelExecutor
>
(
place_
,
&
scope_
,
execution_strategy
,
build_strategy
,
graph_
.
get
());
inner_pe_
->
SkipMemoryReuse
(
/*scope_idx=*/
0
,
info_
->
InputArgNames
());
}
std
::
vector
<
Tensor
>
PEEngine
::
operator
()(
const
std
::
vector
<
Tensor
>
&
inputs
)
{
auto
dense_tensors
=
utils
::
ToDenseTensors
(
inputs
);
return
utils
::
ToTensors
(
this
->
operator
()(
dense_tensors
));
}
std
::
vector
<
DenseTensor
>
PEEngine
::
operator
()(
const
std
::
vector
<
DenseTensor
>
&
inputs
)
{
utils
::
ShareIntoScope
(
info_
->
InputArgNames
(),
inputs
,
&
scope_
);
const
auto
out_names
=
info_
->
OutputArgNames
();
// need to recreate tmp variables in new scope
inner_pe_
->
PrepareVariables
(
&
scope_
);
inner_pe_
->
RunWithoutFetch
(
out_names
);
std
::
vector
<
DenseTensor
>
outputs
;
utils
::
FetchOuts
(
out_names
,
scope_
,
&
outputs
);
// Erase output vars to avoid data rewriting.
scope_
.
EraseVars
(
out_names
);
scope_
.
DropKids
();
return
outputs
;
}
const
std
::
shared_ptr
<
FunctionInfo
>
&
PEEngine
::
Info
()
const
{
return
info_
;
}
}
// namespace jit
}
// namespace paddle
paddle/fluid/jit/engine/pe_engine.h
已删除
100644 → 0
浏览文件 @
1623f1b4
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <vector>
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/jit/engine/base_engine.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/function_utils.h"
namespace
paddle
{
namespace
framework
{
class
ParallelExecutor
;
namespace
details
{
class
ExecutionStrategy
;
}
namespace
ir
{
class
Graph
;
}
}
// namespace framework
namespace
jit
{
using
ExecutionStrategy
=
framework
::
details
::
ExecutionStrategy
;
using
ParallelExecutor
=
framework
::
ParallelExecutor
;
using
Graph
=
framework
::
ir
::
Graph
;
class
PEEngine
:
public
BaseEngine
{
public:
PEEngine
(
const
std
::
shared_ptr
<
FunctionInfo
>
&
info
,
const
VariableMap
&
params_dict
,
const
phi
::
Place
&
place
);
~
PEEngine
()
noexcept
{}
void
CreateGraphAndPE
();
std
::
vector
<
Tensor
>
operator
()(
const
std
::
vector
<
Tensor
>
&
inputs
);
std
::
vector
<
DenseTensor
>
operator
()(
const
std
::
vector
<
DenseTensor
>
&
inputs
);
const
std
::
shared_ptr
<
FunctionInfo
>
&
Info
()
const
;
private:
std
::
shared_ptr
<
FunctionInfo
>
info_
;
framework
::
Scope
scope_
;
phi
::
Place
place_
;
std
::
shared_ptr
<
ParallelExecutor
>
inner_pe_
;
std
::
shared_ptr
<
Graph
>
graph_
;
};
}
// namespace jit
}
// namespace paddle
paddle/fluid/jit/serializer.cc
浏览文件 @
5664306b
...
...
@@ -20,9 +20,7 @@
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/jit/engine/executor_engine.h"
#include "paddle/fluid/jit/engine/interpreter_engine.h"
#include "paddle/fluid/jit/engine/pe_engine.h"
#include "paddle/fluid/jit/engine/predictor_engine.h"
#include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/property.h"
...
...
@@ -74,14 +72,7 @@ Layer Deserializer::operator()(const std::string& path,
auto
&
info
=
it
->
second
;
VLOG
(
3
)
<<
"Add function type: "
<<
FLAGS_jit_engine_type
<<
" Function name: "
<<
func_name
;
if
(
FLAGS_jit_engine_type
==
"Executor"
)
{
layer
.
SetEngine
(
func_name
,
utils
::
MakeEngine
<
ExecutorEngine
>
(
info
,
params_dict
,
place
));
}
else
if
(
FLAGS_jit_engine_type
==
"PE"
)
{
layer
.
SetEngine
(
func_name
,
utils
::
MakeEngine
<
PEEngine
>
(
info
,
params_dict
,
place
));
}
else
if
(
FLAGS_jit_engine_type
==
"New"
)
{
if
(
FLAGS_jit_engine_type
==
"New"
)
{
layer
.
SetEngine
(
func_name
,
utils
::
MakeEngine
<
InterpreterEngine
>
(
info
,
params_dict
,
place
));
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录