Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
882053dc
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
882053dc
编写于
8月 04, 2022
作者:
W
WangZhen
提交者:
GitHub
8月 04, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[JitLayer]Move Function classes to a sub dir (#44844)
* Move Function classes to a sub dir * Format code
上级
c3a2cdcc
变更
16
隐藏空白更改
内联
并排
Showing
16 changed file
with
275 addition
and
51 deletion
+275
-51
paddle/fluid/jit/CMakeLists.txt
paddle/fluid/jit/CMakeLists.txt
+8
-2
paddle/fluid/jit/all.h
paddle/fluid/jit/all.h
+4
-4
paddle/fluid/jit/compilation_unit.cc
paddle/fluid/jit/compilation_unit.cc
+1
-1
paddle/fluid/jit/function/CMakeLists.txt
paddle/fluid/jit/function/CMakeLists.txt
+9
-0
paddle/fluid/jit/function/base_function.h
paddle/fluid/jit/function/base_function.h
+0
-0
paddle/fluid/jit/function/executor_function.cc
paddle/fluid/jit/function/executor_function.cc
+63
-0
paddle/fluid/jit/function/executor_function.h
paddle/fluid/jit/function/executor_function.h
+5
-34
paddle/fluid/jit/function/pe_function.cc
paddle/fluid/jit/function/pe_function.cc
+115
-0
paddle/fluid/jit/function/pe_function.h
paddle/fluid/jit/function/pe_function.h
+60
-0
paddle/fluid/jit/layer.cc
paddle/fluid/jit/layer.cc
+1
-1
paddle/fluid/jit/layer.h
paddle/fluid/jit/layer.h
+1
-1
paddle/fluid/jit/serializer.cc
paddle/fluid/jit/serializer.cc
+2
-2
paddle/fluid/pybind/eager_utils.cc
paddle/fluid/pybind/eager_utils.cc
+2
-2
paddle/fluid/pybind/eager_utils.h
paddle/fluid/pybind/eager_utils.h
+1
-1
paddle/fluid/pybind/jit.cc
paddle/fluid/pybind/jit.cc
+2
-2
python/setup.py.in
python/setup.py.in
+1
-1
未找到文件。
paddle/fluid/jit/CMakeLists.txt
浏览文件 @
882053dc
add_subdirectory
(
function
)
proto_library
(
paddle_jit_property_proto SRCS property.proto
)
cc_library
(
...
...
@@ -33,8 +34,13 @@ cc_library(
cc_library
(
jit_layer
SRCS layer.cc
DEPS jit_serializer jit_function_utils jit_serializer_utils
jit_compilation_unit jit_function_schema
)
DEPS jit_serializer
jit_function_utils
jit_serializer_utils
jit_compilation_unit
jit_function_schema
jit_executor_function
jit_pe_function
)
if
(
WITH_TESTING AND NOT WIN32
)
add_custom_target
(
...
...
paddle/fluid/jit/all.h
浏览文件 @
882053dc
...
...
@@ -14,7 +14,7 @@
#pragma once
#include "
base_function.h"
#include "layer.h"
#include "serializer.h"
#include "serializer_utils.h"
#include "
function/base_function.h" // NOLINT
#include "layer.h"
// NOLINT
#include "serializer.h"
// NOLINT
#include "serializer_utils.h"
// NOLINT
paddle/fluid/jit/compilation_unit.cc
浏览文件 @
882053dc
...
...
@@ -16,7 +16,7 @@
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/
function/
base_function.h"
namespace
paddle
{
namespace
jit
{
...
...
paddle/fluid/jit/function/CMakeLists.txt
0 → 100644
浏览文件 @
882053dc
cc_library
(
jit_executor_function
SRCS executor_function.cc
DEPS executor
)
cc_library
(
jit_pe_function
SRCS pe_function.cc
DEPS parallel_executor
)
paddle/fluid/jit/base_function.h
→
paddle/fluid/jit/
function/
base_function.h
浏览文件 @
882053dc
文件已移动
paddle/fluid/jit/function/executor_function.cc
0 → 100644
浏览文件 @
882053dc
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/jit/function/executor_function.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/phi/core/enforce.h"
namespace
paddle
{
namespace
jit
{
ExecutorFunction
::
ExecutorFunction
(
const
std
::
shared_ptr
<
FunctionInfo
>
&
info
,
const
Name2VariableMap
&
params_dict
,
const
phi
::
Place
&
place
)
:
info_
(
info
),
place_
(
place
),
inner_exe_
(
place_
)
{
info_
->
RemoveDescFeedFetch
();
PADDLE_ENFORCE_GT
(
static_cast
<
int64_t
>
(
info_
->
ProgramDesc
().
Block
(
0
).
OpSize
()),
0
,
platform
::
errors
::
PreconditionNotMet
(
"There is no operator in ProgramDesc."
));
utils
::
ShareParamsIntoScope
(
info_
->
ParamNames
(),
params_dict
,
&
scope_
);
VLOG
(
6
)
<<
framework
::
GenScopeTreeDebugInfo
(
&
scope_
);
}
std
::
vector
<
Tensor
>
ExecutorFunction
::
operator
()(
const
std
::
vector
<
Tensor
>
&
inputs
)
{
auto
dense_tensors
=
utils
::
ToDenseTensors
(
inputs
);
return
utils
::
ToTensors
(
this
->
operator
()(
dense_tensors
));
}
std
::
vector
<
DenseTensor
>
ExecutorFunction
::
operator
()(
const
std
::
vector
<
DenseTensor
>
&
inputs
)
{
utils
::
ShareIntoScope
(
info_
->
InputArgNames
(),
inputs
,
&
scope_
);
inner_exe_
.
Run
(
info_
->
ProgramDesc
(),
&
scope_
,
/*blockID=*/
0
,
false
,
true
,
info_
->
OutputArgNames
());
std
::
vector
<
DenseTensor
>
outputs
;
utils
::
FetchOuts
(
info_
->
OutputArgNames
(),
scope_
,
&
outputs
);
return
outputs
;
}
const
std
::
shared_ptr
<
FunctionInfo
>
&
ExecutorFunction
::
Info
()
const
{
return
info_
;
}
}
// namespace jit
}
// namespace paddle
paddle/fluid/jit/executor_function.h
→
paddle/fluid/jit/
function/
executor_function.h
浏览文件 @
882053dc
...
...
@@ -14,17 +14,12 @@
#pragma once
#include <iostream>
#include <string>
#include <vector>
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/
function/
base_function.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/function_utils.h"
...
...
@@ -35,39 +30,15 @@ class ExecutorFunction : public BaseFunction {
public:
ExecutorFunction
(
const
std
::
shared_ptr
<
FunctionInfo
>
&
info
,
const
Name2VariableMap
&
params_dict
,
const
phi
::
Place
&
place
)
:
info_
(
info
),
place_
(
place
),
inner_exe_
(
place_
)
{
info_
->
RemoveDescFeedFetch
();
PADDLE_ENFORCE_GT
(
static_cast
<
int64_t
>
(
info_
->
ProgramDesc
().
Block
(
0
).
OpSize
()),
0
,
platform
::
errors
::
PreconditionNotMet
(
"There is no operator in ProgramDesc."
));
utils
::
ShareParamsIntoScope
(
info_
->
ParamNames
(),
params_dict
,
&
scope_
);
VLOG
(
6
)
<<
framework
::
GenScopeTreeDebugInfo
(
&
scope_
);
}
const
phi
::
Place
&
place
);
~
ExecutorFunction
()
noexcept
{}
std
::
vector
<
Tensor
>
operator
()(
const
std
::
vector
<
Tensor
>
&
inputs
)
{
auto
dense_tensors
=
utils
::
ToDenseTensors
(
inputs
);
return
utils
::
ToTensors
(
this
->
operator
()(
dense_tensors
));
}
std
::
vector
<
Tensor
>
operator
()(
const
std
::
vector
<
Tensor
>
&
inputs
);
std
::
vector
<
DenseTensor
>
operator
()(
const
std
::
vector
<
DenseTensor
>
&
inputs
)
{
utils
::
ShareIntoScope
(
info_
->
InputArgNames
(),
inputs
,
&
scope_
);
inner_exe_
.
Run
(
info_
->
ProgramDesc
(),
&
scope_
,
/*blockID=*/
0
,
false
,
true
,
info_
->
OutputArgNames
());
std
::
vector
<
DenseTensor
>
outputs
;
utils
::
FetchOuts
(
info_
->
OutputArgNames
(),
scope_
,
&
outputs
);
return
outputs
;
}
std
::
vector
<
DenseTensor
>
operator
()(
const
std
::
vector
<
DenseTensor
>
&
inputs
);
const
std
::
shared_ptr
<
FunctionInfo
>
&
Info
()
const
{
return
info_
;
}
const
std
::
shared_ptr
<
FunctionInfo
>
&
Info
()
const
;
private:
std
::
shared_ptr
<
FunctionInfo
>
info_
;
...
...
paddle/fluid/jit/function/pe_function.cc
0 → 100644
浏览文件 @
882053dc
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/jit/function/pe_function.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/details/build_strategy.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/phi/core/enforce.h"
namespace
paddle
{
namespace
jit
{
static
ExecutionStrategy
GetExecutionStrategy
(
const
platform
::
Place
&
place
)
{
ExecutionStrategy
execution_strategy
;
auto
device_type
=
platform
::
Place2DeviceType
(
place
);
switch
(
device_type
)
{
case
platform
::
DeviceType
::
CPU
:
{
execution_strategy
.
num_threads_
=
2
;
break
;
}
case
platform
::
DeviceType
::
CUDA
:
{
// NOTE: According experiments, one thread is faster in
// most model training.
execution_strategy
.
num_threads_
=
1
;
break
;
}
case
platform
::
DeviceType
::
XPU
:
{
execution_strategy
.
num_threads_
=
1
;
break
;
}
case
platform
::
DeviceType
::
IPU
:
{
execution_strategy
.
num_threads_
=
1
;
break
;
}
default:
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
"Unsupported Device type %d."
,
device_type
));
}
execution_strategy
.
use_device_
=
device_type
;
return
execution_strategy
;
}
PEFunction
::
PEFunction
(
const
std
::
shared_ptr
<
FunctionInfo
>
&
info
,
const
Name2VariableMap
&
params_dict
,
const
phi
::
Place
&
place
)
:
info_
(
info
),
place_
(
place
)
{
info_
->
RemoveDescFeedFetch
();
PADDLE_ENFORCE_GT
(
static_cast
<
int64_t
>
(
info_
->
ProgramDesc
().
Block
(
0
).
OpSize
()),
0
,
platform
::
errors
::
PreconditionNotMet
(
"There is no operator in ProgramDesc."
));
utils
::
ShareParamsIntoScope
(
info_
->
ParamNames
(),
params_dict
,
&
scope_
);
VLOG
(
6
)
<<
framework
::
GenScopeTreeDebugInfo
(
&
scope_
);
CreateGraphAndPE
();
}
void
PEFunction
::
CreateGraphAndPE
()
{
framework
::
details
::
BuildStrategy
build_strategy
;
auto
execution_strategy
=
GetExecutionStrategy
(
place_
);
auto
&
program_desc
=
info_
->
ProgramDesc
();
const
framework
::
BlockDesc
&
global_block
=
program_desc
.
Block
(
0
);
int64_t
start_op_index
=
0
;
int64_t
end_op_index
=
static_cast
<
int64_t
>
(
global_block
.
OpSize
());
graph_
=
std
::
make_shared
<
Graph
>
(
program_desc
,
start_op_index
,
end_op_index
);
inner_pe_
=
std
::
make_shared
<
ParallelExecutor
>
(
place_
,
&
scope_
,
execution_strategy
,
build_strategy
,
graph_
.
get
());
inner_pe_
->
PrepareVariables
(
&
scope_
);
inner_pe_
->
SkipMemoryReuse
(
/*scope_idx=*/
0
,
info_
->
InputArgNames
());
}
std
::
vector
<
Tensor
>
PEFunction
::
operator
()(
const
std
::
vector
<
Tensor
>
&
inputs
)
{
auto
dense_tensors
=
utils
::
ToDenseTensors
(
inputs
);
return
utils
::
ToTensors
(
this
->
operator
()(
dense_tensors
));
}
std
::
vector
<
DenseTensor
>
PEFunction
::
operator
()(
const
std
::
vector
<
DenseTensor
>
&
inputs
)
{
utils
::
ShareIntoScope
(
info_
->
InputArgNames
(),
inputs
,
&
scope_
);
// update op_handle scope_map in pe->executor_->Graph
std
::
unordered_map
<
framework
::
Scope
*
,
framework
::
Scope
*>
scope_map
=
{
{
inner_pe_
->
GetLocalScopes
().
front
(),
&
scope_
}};
inner_pe_
->
ResetOpHandleScopeMapOfGraphs
(
scope_map
);
// need to recreate tmp variables in new scope
inner_pe_
->
PrepareVariables
(
&
scope_
);
inner_pe_
->
RunWithoutFetch
(
info_
->
OutputArgNames
());
std
::
vector
<
DenseTensor
>
outputs
;
utils
::
FetchOuts
(
info_
->
OutputArgNames
(),
scope_
,
&
outputs
);
scope_
.
DropKids
();
return
outputs
;
}
const
std
::
shared_ptr
<
FunctionInfo
>
&
PEFunction
::
Info
()
const
{
return
info_
;
}
}
// namespace jit
}
// namespace paddle
paddle/fluid/jit/pe_function.h
→
paddle/fluid/jit/
function/
pe_function.h
浏览文件 @
882053dc
...
...
@@ -14,21 +14,14 @@
#pragma once
#include <iostream>
#include <string>
#include <vector>
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/details/build_strategy.h"
#include "paddle/fluid/framework/details/execution_strategy.h"
#include "paddle/fluid/framework/executor_cache.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/p
rogram_desc
.h"
#include "paddle/fluid/framework/p
arallel_executor
.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/
function/
base_function.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/function_utils.h"
...
...
@@ -43,94 +36,17 @@ class PEFunction : public BaseFunction {
public:
PEFunction
(
const
std
::
shared_ptr
<
FunctionInfo
>
&
info
,
const
Name2VariableMap
&
params_dict
,
const
phi
::
Place
&
place
)
:
info_
(
info
),
place_
(
place
)
{
info_
->
RemoveDescFeedFetch
();
PADDLE_ENFORCE_GT
(
static_cast
<
int64_t
>
(
info_
->
ProgramDesc
().
Block
(
0
).
OpSize
()),
0
,
platform
::
errors
::
PreconditionNotMet
(
"There is no operator in ProgramDesc."
));
utils
::
ShareParamsIntoScope
(
info_
->
ParamNames
(),
params_dict
,
&
scope_
);
VLOG
(
6
)
<<
framework
::
GenScopeTreeDebugInfo
(
&
scope_
);
CreateGraphAndPE
();
}
const
phi
::
Place
&
place
);
~
PEFunction
()
noexcept
{}
static
ExecutionStrategy
GetExecutionStrategy
(
const
platform
::
Place
&
place
)
{
ExecutionStrategy
execution_strategy
;
void
CreateGraphAndPE
();
auto
device_type
=
platform
::
Place2DeviceType
(
place
);
switch
(
device_type
)
{
case
platform
::
DeviceType
::
CPU
:
{
execution_strategy
.
num_threads_
=
2
;
break
;
}
case
platform
::
DeviceType
::
CUDA
:
{
// NOTE: According experiments, one thread is faster in
// most model training.
execution_strategy
.
num_threads_
=
1
;
break
;
}
case
platform
::
DeviceType
::
XPU
:
{
execution_strategy
.
num_threads_
=
1
;
break
;
}
case
platform
::
DeviceType
::
IPU
:
{
execution_strategy
.
num_threads_
=
1
;
break
;
}
default:
PADDLE_THROW
(
platform
::
errors
::
Unavailable
(
"Unsupported Device type %d."
,
device_type
));
}
execution_strategy
.
use_device_
=
device_type
;
std
::
vector
<
Tensor
>
operator
()(
const
std
::
vector
<
Tensor
>
&
inputs
);
return
execution_strategy
;
}
std
::
vector
<
DenseTensor
>
operator
()(
const
std
::
vector
<
DenseTensor
>
&
inputs
);
void
CreateGraphAndPE
()
{
framework
::
details
::
BuildStrategy
build_strategy
;
auto
execution_strategy
=
GetExecutionStrategy
(
place_
);
auto
&
program_desc
=
info_
->
ProgramDesc
();
const
framework
::
BlockDesc
&
global_block
=
program_desc
.
Block
(
0
);
int64_t
start_op_index
=
0
;
int64_t
end_op_index
=
static_cast
<
int64_t
>
(
global_block
.
OpSize
());
graph_
=
std
::
make_shared
<
Graph
>
(
program_desc
,
start_op_index
,
end_op_index
);
inner_pe_
=
std
::
make_shared
<
ParallelExecutor
>
(
place_
,
&
scope_
,
execution_strategy
,
build_strategy
,
graph_
.
get
());
inner_pe_
->
PrepareVariables
(
&
scope_
);
inner_pe_
->
SkipMemoryReuse
(
/*scope_idx=*/
0
,
info_
->
InputArgNames
());
}
std
::
vector
<
Tensor
>
operator
()(
const
std
::
vector
<
Tensor
>
&
inputs
)
{
auto
dense_tensors
=
utils
::
ToDenseTensors
(
inputs
);
return
utils
::
ToTensors
(
this
->
operator
()(
dense_tensors
));
}
std
::
vector
<
DenseTensor
>
operator
()(
const
std
::
vector
<
DenseTensor
>
&
inputs
)
{
utils
::
ShareIntoScope
(
info_
->
InputArgNames
(),
inputs
,
&
scope_
);
// update op_handle scope_map in pe->executor_->Graph
std
::
unordered_map
<
framework
::
Scope
*
,
framework
::
Scope
*>
scope_map
=
{
{
inner_pe_
->
GetLocalScopes
().
front
(),
&
scope_
}};
inner_pe_
->
ResetOpHandleScopeMapOfGraphs
(
scope_map
);
// need to recreate tmp variables in new scope
inner_pe_
->
PrepareVariables
(
&
scope_
);
inner_pe_
->
RunWithoutFetch
(
info_
->
OutputArgNames
());
std
::
vector
<
DenseTensor
>
outputs
;
utils
::
FetchOuts
(
info_
->
OutputArgNames
(),
scope_
,
&
outputs
);
scope_
.
DropKids
();
return
outputs
;
}
const
std
::
shared_ptr
<
FunctionInfo
>
&
Info
()
const
{
return
info_
;
}
const
std
::
shared_ptr
<
FunctionInfo
>
&
Info
()
const
;
private:
std
::
shared_ptr
<
FunctionInfo
>
info_
;
...
...
paddle/fluid/jit/layer.cc
浏览文件 @
882053dc
...
...
@@ -16,8 +16,8 @@
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/compilation_unit.h"
#include "paddle/fluid/jit/function/base_function.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/errors.h"
...
...
paddle/fluid/jit/layer.h
浏览文件 @
882053dc
...
...
@@ -21,7 +21,7 @@
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/common/place.h"
#include "base_function.h" //NOLINT
#include "
function/
base_function.h" //NOLINT
namespace
paddle
{
...
...
paddle/fluid/jit/serializer.cc
浏览文件 @
882053dc
...
...
@@ -20,9 +20,9 @@
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/jit/executor_function.h"
#include "paddle/fluid/jit/function/executor_function.h"
#include "paddle/fluid/jit/function/pe_function.h"
#include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/pe_function.h"
#include "paddle/fluid/jit/property.h"
#include "paddle/fluid/jit/serializer_utils.h"
...
...
paddle/fluid/pybind/eager_utils.cc
浏览文件 @
882053dc
...
...
@@ -22,8 +22,8 @@ limitations under the License. */
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/scope_guard.h"
#include "paddle/fluid/jit/executor_function.h"
#include "paddle/fluid/jit/pe_function.h"
#include "paddle/fluid/jit/
function/
executor_function.h"
#include "paddle/fluid/jit/
function/
pe_function.h"
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/operators/py_func_op.h"
#include "paddle/fluid/operators/utils.h"
...
...
paddle/fluid/pybind/eager_utils.h
浏览文件 @
882053dc
...
...
@@ -20,7 +20,7 @@ typedef SSIZE_T ssize_t;
#include "paddle/fluid/eager/hooks.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/jit/base_function.h"
#include "paddle/fluid/jit/
function/
base_function.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
...
...
paddle/fluid/pybind/jit.cc
浏览文件 @
882053dc
...
...
@@ -18,10 +18,10 @@ limitations under the License. */
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/jit/executor_function.h"
#include "paddle/fluid/jit/function/executor_function.h"
#include "paddle/fluid/jit/function/pe_function.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/pe_function.h"
#include "paddle/fluid/jit/serializer.h"
namespace
py
=
pybind11
;
...
...
python/setup.py.in
浏览文件 @
882053dc
...
...
@@ -630,7 +630,7 @@ headers = (
jit_layer_headers = ['layer.h', 'serializer.h', 'serializer_utils.h', 'all.h', 'base_function.h']
for f in jit_layer_headers:
headers += list(find_files(f, '@PADDLE_SOURCE_DIR@/paddle/fluid/jit', recursive=
Fals
e))
headers += list(find_files(f, '@PADDLE_SOURCE_DIR@/paddle/fluid/jit', recursive=
Tru
e))
if '${WITH_MKLDNN}' == 'ON':
headers += list(find_files('*', '${MKLDNN_INSTALL_DIR}/include')) # mkldnn
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录