Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
fe827540
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
fe827540
编写于
7月 05, 2021
作者:
W
Wilber
提交者:
GitHub
7月 05, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
cherry-pick prs. (#33932)
上级
16ed3cc9
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
25 addition
and
6 deletion
+25
-6
cmake/inference_lib.cmake
cmake/inference_lib.cmake
+1
-1
cmake/tensorrt.cmake
cmake/tensorrt.cmake
+19
-1
paddle/fluid/inference/api/paddle_analysis_config.h
paddle/fluid/inference/api/paddle_analysis_config.h
+1
-1
tools/remove_grad_op_and_kernel.py
tools/remove_grad_op_and_kernel.py
+4
-3
未找到文件。
cmake/inference_lib.cmake
浏览文件 @
fe827540
...
...
@@ -345,7 +345,7 @@ function(version version_file)
file
(
APPEND
${
version_file
}
"CXX compiler version:
${
CMAKE_CXX_COMPILER_VERSION
}
\n
"
)
if
(
TENSORRT_FOUND
)
file
(
APPEND
${
version_file
}
"WITH_TENSORRT:
${
TENSORRT_FOUND
}
\n
"
"TensorRT version: v
${
TENSORRT_MAJOR_VERSION
}
\n
"
)
"WITH_TENSORRT:
${
TENSORRT_FOUND
}
\n
"
"TensorRT version: v
${
TENSORRT_MAJOR_VERSION
}
.
${
TENSORRT_MINOR_VERSION
}
.
${
TENSORRT_PATCH_VERSION
}
.
${
TENSORRT_BUILD_VERSION
}
\n
"
)
endif
()
if
(
WITH_LITE
)
file
(
APPEND
${
version_file
}
"WITH_LITE:
${
WITH_LITE
}
\n
"
"LITE_GIT_TAG:
${
LITE_GIT_TAG
}
\n
"
)
...
...
cmake/tensorrt.cmake
浏览文件 @
fe827540
...
...
@@ -47,11 +47,23 @@ if(TENSORRT_FOUND)
file
(
READ
${
TENSORRT_INCLUDE_DIR
}
/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS
)
string
(
REGEX MATCH
"define NV_TENSORRT_MAJOR +([0-9]+)"
TENSORRT_MAJOR_VERSION
"
${
TENSORRT_VERSION_FILE_CONTENTS
}
"
)
string
(
REGEX MATCH
"define NV_TENSORRT_MINOR +([0-9]+)"
TENSORRT_MINOR_VERSION
"
${
TENSORRT_VERSION_FILE_CONTENTS
}
"
)
string
(
REGEX MATCH
"define NV_TENSORRT_PATCH +([0-9]+)"
TENSORRT_PATCH_VERSION
"
${
TENSORRT_VERSION_FILE_CONTENTS
}
"
)
string
(
REGEX MATCH
"define NV_TENSORRT_BUILD +([0-9]+)"
TENSORRT_BUILD_VERSION
"
${
TENSORRT_VERSION_FILE_CONTENTS
}
"
)
if
(
"
${
TENSORRT_MAJOR_VERSION
}
"
STREQUAL
""
)
file
(
READ
${
TENSORRT_INCLUDE_DIR
}
/NvInferVersion.h TENSORRT_VERSION_FILE_CONTENTS
)
string
(
REGEX MATCH
"define NV_TENSORRT_MAJOR +([0-9]+)"
TENSORRT_MAJOR_VERSION
"
${
TENSORRT_VERSION_FILE_CONTENTS
}
"
)
string
(
REGEX MATCH
"define NV_TENSORRT_MINOR +([0-9]+)"
TENSORRT_MINOR_VERSION
"
${
TENSORRT_VERSION_FILE_CONTENTS
}
"
)
string
(
REGEX MATCH
"define NV_TENSORRT_PATCH +([0-9]+)"
TENSORRT_PATCH_VERSION
"
${
TENSORRT_VERSION_FILE_CONTENTS
}
"
)
string
(
REGEX MATCH
"define NV_TENSORRT_BUILD +([0-9]+)"
TENSORRT_BUILD_VERSION
"
${
TENSORRT_VERSION_FILE_CONTENTS
}
"
)
endif
()
if
(
"
${
TENSORRT_MAJOR_VERSION
}
"
STREQUAL
""
)
...
...
@@ -60,9 +72,15 @@ if(TENSORRT_FOUND)
string
(
REGEX REPLACE
"define NV_TENSORRT_MAJOR +([0-9]+)"
"
\\
1"
TENSORRT_MAJOR_VERSION
"
${
TENSORRT_MAJOR_VERSION
}
"
)
string
(
REGEX REPLACE
"define NV_TENSORRT_MINOR +([0-9]+)"
"
\\
1"
TENSORRT_MINOR_VERSION
"
${
TENSORRT_MINOR_VERSION
}
"
)
string
(
REGEX REPLACE
"define NV_TENSORRT_PATCH +([0-9]+)"
"
\\
1"
TENSORRT_PATCH_VERSION
"
${
TENSORRT_PATCH_VERSION
}
"
)
string
(
REGEX REPLACE
"define NV_TENSORRT_BUILD +([0-9]+)"
"
\\
1"
TENSORRT_BUILD_VERSION
"
${
TENSORRT_BUILD_VERSION
}
"
)
message
(
STATUS
"Current TensorRT header is
${
TENSORRT_INCLUDE_DIR
}
/NvInfer.h. "
"Current TensorRT version is v
${
TENSORRT_MAJOR_VERSION
}
. "
)
"Current TensorRT version is v
${
TENSORRT_MAJOR_VERSION
}
.
${
TENSORRT_MINOR_VERSION
}
.
${
TENSORRT_PATCH_VERSION
}
.
${
TENSORRT_BUILD_VERSION
}
"
)
include_directories
(
${
TENSORRT_INCLUDE_DIR
}
)
link_directories
(
${
TENSORRT_LIBRARY
}
)
add_definitions
(
-DPADDLE_WITH_TENSORRT
)
...
...
paddle/fluid/inference/api/paddle_analysis_config.h
浏览文件 @
fe827540
...
...
@@ -678,7 +678,7 @@ struct PD_INFER_DECL AnalysisConfig {
bool
xpu_adaptive_seqlen_
;
// mkldnn related.
int
mkldnn_cache_capacity_
{
0
};
int
mkldnn_cache_capacity_
{
1
0
};
bool
use_mkldnn_quantizer_
{
false
};
std
::
shared_ptr
<
MkldnnQuantizerConfig
>
mkldnn_quantizer_config_
;
bool
use_mkldnn_bfloat16_
{
false
};
...
...
tools/remove_grad_op_and_kernel.py
浏览文件 @
fe827540
...
...
@@ -20,6 +20,7 @@ import os
import
sys
import
re
import
glob
import
io
def
find_type_files
(
cur_dir
,
file_type
,
file_list
=
[]):
...
...
@@ -124,7 +125,7 @@ if __name__ == '__main__':
custom_pattern2
=
custom_pattern2
[:
-
1
]
all_matches
=
[]
with
open
(
op_file
,
'r
'
)
as
f
:
with
io
.
open
(
op_file
,
'r'
,
encoding
=
'utf-8
'
)
as
f
:
content
=
''
.
join
(
f
.
readlines
())
op
,
op_count
=
remove_grad_op_and_kernel
(
content
,
op_pattern1
,
...
...
@@ -157,8 +158,8 @@ if __name__ == '__main__':
for
i
in
all_matches
:
content
=
content
.
replace
(
i
,
''
)
with
open
(
op_file
,
'w
'
)
as
f
:
f
.
write
(
content
)
with
io
.
open
(
op_file
,
'w'
,
encoding
=
'utf-8
'
)
as
f
:
f
.
write
(
u
'{}'
.
format
(
content
)
)
# 2. update operators/CMakeLists.txt
cmake_file
=
os
.
path
.
join
(
tool_dir
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录