Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
7babb3d2
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
7babb3d2
编写于
9月 16, 2021
作者:
W
Wilber
提交者:
GitHub
9月 16, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
c and go api support tuned trt. (#35771)
上级
a3790606
变更
9
显示空白变更内容
内联
并排
Showing
9 changed file
with
228 addition
and
9 deletion
+228
-9
paddle/fluid/inference/capi_exp/pd_config.cc
paddle/fluid/inference/capi_exp/pd_config.cc
+44
-2
paddle/fluid/inference/capi_exp/pd_config.h
paddle/fluid/inference/capi_exp/pd_config.h
+64
-1
paddle/fluid/inference/goapi/config.go
paddle/fluid/inference/goapi/config.go
+67
-2
paddle/fluid/inference/goapi/config_test.go
paddle/fluid/inference/goapi/config_test.go
+1
-1
paddle/fluid/inference/goapi/predictor_test.go
paddle/fluid/inference/goapi/predictor_test.go
+48
-0
paddle/fluid/inference/goapi/test.sh
paddle/fluid/inference/goapi/test.sh
+1
-0
paddle/fluid/inference/tests/api/analyzer_capi_exp_gpu_tester.cc
...fluid/inference/tests/api/analyzer_capi_exp_gpu_tester.cc
+1
-1
paddle/fluid/inference/tests/api/analyzer_capi_exp_pd_config_tester.cc
...inference/tests/api/analyzer_capi_exp_pd_config_tester.cc
+1
-1
paddle/fluid/inference/tests/api/analyzer_capi_exp_xpu_tester.cc
...fluid/inference/tests/api/analyzer_capi_exp_xpu_tester.cc
+1
-1
未找到文件。
paddle/fluid/inference/capi_exp/pd_config.cc
浏览文件 @
7babb3d2
...
@@ -231,6 +231,48 @@ void PD_ConfigSetTrtDynamicShapeInfo(__pd_keep PD_Config* pd_config,
...
@@ -231,6 +231,48 @@ void PD_ConfigSetTrtDynamicShapeInfo(__pd_keep PD_Config* pd_config,
optim_input_shapes
,
disable_trt_plugin_fp16
);
optim_input_shapes
,
disable_trt_plugin_fp16
);
}
}
PD_Bool
PD_ConfigTensorRtDynamicShapeEnabled
(
__pd_keep
PD_Config
*
pd_config
)
{
CHECK_AND_CONVERT_PD_CONFIG
;
return
config
->
tensorrt_dynamic_shape_enabled
();
}
void
PD_ConfigEnableTunedTensorRtDynamicShape
(
__pd_keep
PD_Config
*
pd_config
,
const
char
*
shape_range_info_path
,
PD_Bool
allow_build_at_runtime
)
{
CHECK_AND_CONVERT_PD_CONFIG
;
config
->
EnableTunedTensorRtDynamicShape
(
shape_range_info_path
,
allow_build_at_runtime
);
}
PD_Bool
PD_ConfigTunedTensorRtDynamicShape
(
__pd_keep
PD_Config
*
pd_config
)
{
CHECK_AND_CONVERT_PD_CONFIG
;
return
config
->
tuned_tensorrt_dynamic_shape
();
}
PD_Bool
PD_ConfigTrtAllowBuildAtRuntime
(
__pd_keep
PD_Config
*
pd_config
)
{
CHECK_AND_CONVERT_PD_CONFIG
;
return
config
->
trt_allow_build_at_runtime
();
}
void
PD_ConfigCollectShapeRangeInfo
(
__pd_keep
PD_Config
*
pd_config
,
const
char
*
shape_range_info_path
)
{
CHECK_AND_CONVERT_PD_CONFIG
;
config
->
CollectShapeRangeInfo
(
shape_range_info_path
);
}
const
char
*
PD_ConfigShapeRangeInfoPath
(
__pd_keep
PD_Config
*
pd_config
)
{
CHECK_AND_CONVERT_PD_CONFIG
;
auto
shape_str
=
config
->
shape_range_info_path
();
char
*
c
=
reinterpret_cast
<
char
*>
(
malloc
(
shape_str
.
length
()
+
1
));
snprintf
(
c
,
shape_str
.
length
()
+
1
,
"%s"
,
shape_str
.
c_str
());
return
c
;
}
PD_Bool
PD_ConfigShapeRangeInfoCollected
(
__pd_keep
PD_Config
*
pd_config
)
{
CHECK_AND_CONVERT_PD_CONFIG
;
return
config
->
shape_range_info_collected
();
}
void
PD_ConfigDisableTensorRtOPs
(
__pd_keep
PD_Config
*
pd_config
,
size_t
ops_num
,
void
PD_ConfigDisableTensorRtOPs
(
__pd_keep
PD_Config
*
pd_config
,
size_t
ops_num
,
const
char
**
ops_name
)
{
const
char
**
ops_name
)
{
CHECK_AND_CONVERT_PD_CONFIG
;
CHECK_AND_CONVERT_PD_CONFIG
;
...
@@ -358,9 +400,9 @@ PD_Bool PD_ConfigModelFromMemory(__pd_keep PD_Config* pd_config) {
...
@@ -358,9 +400,9 @@ PD_Bool PD_ConfigModelFromMemory(__pd_keep PD_Config* pd_config) {
CHECK_AND_CONVERT_PD_CONFIG
;
CHECK_AND_CONVERT_PD_CONFIG
;
return
config
->
model_from_memory
();
return
config
->
model_from_memory
();
}
}
void
PD_ConfigEnableMemoryOptim
(
__pd_keep
PD_Config
*
pd_config
)
{
void
PD_ConfigEnableMemoryOptim
(
__pd_keep
PD_Config
*
pd_config
,
PD_Bool
x
)
{
CHECK_AND_CONVERT_PD_CONFIG
;
CHECK_AND_CONVERT_PD_CONFIG
;
config
->
EnableMemoryOptim
();
config
->
EnableMemoryOptim
(
x
);
}
}
PD_Bool
PD_ConfigMemoryOptimEnabled
(
__pd_keep
PD_Config
*
pd_config
)
{
PD_Bool
PD_ConfigMemoryOptimEnabled
(
__pd_keep
PD_Config
*
pd_config
)
{
CHECK_AND_CONVERT_PD_CONFIG
;
CHECK_AND_CONVERT_PD_CONFIG
;
...
...
paddle/fluid/inference/capi_exp/pd_config.h
浏览文件 @
7babb3d2
...
@@ -324,6 +324,69 @@ PADDLE_CAPI_EXPORT extern void PD_ConfigSetTrtDynamicShapeInfo(
...
@@ -324,6 +324,69 @@ PADDLE_CAPI_EXPORT extern void PD_ConfigSetTrtDynamicShapeInfo(
size_t
*
shapes_num
,
int32_t
**
min_shape
,
int32_t
**
max_shape
,
size_t
*
shapes_num
,
int32_t
**
min_shape
,
int32_t
**
max_shape
,
int32_t
**
optim_shape
,
PD_Bool
disable_trt_plugin_fp16
);
int32_t
**
optim_shape
,
PD_Bool
disable_trt_plugin_fp16
);
///
///
/// \brief A boolean state telling whether the trt dynamic_shape is used.
///
/// \param[in] pd_onfig config
///
PADDLE_CAPI_EXPORT
extern
PD_Bool
PD_ConfigTensorRtDynamicShapeEnabled
(
__pd_keep
PD_Config
*
pd_config
);
///
/// \brief Enable tuned tensorrt dynamic shape.
///
/// \param[in] pd_onfig config
/// \param[in] shape_range_info_path the path to shape_info file got in
/// CollectShapeInfo mode.
/// \param[in] allow_build_at_runtime allow build trt engine at runtime.
///
PADDLE_CAPI_EXPORT
extern
void
PD_ConfigEnableTunedTensorRtDynamicShape
(
__pd_keep
PD_Config
*
pd_config
,
const
char
*
shape_range_info_path
,
PD_Bool
allow_build_at_runtime
);
///
/// \brief A boolean state telling whether to use tuned tensorrt dynamic
/// shape.
///
/// \param[in] pd_onfig config
///
PADDLE_CAPI_EXPORT
extern
PD_Bool
PD_ConfigTunedTensorRtDynamicShape
(
__pd_keep
PD_Config
*
pd_config
);
///
/// \brief A boolean state telling whether to allow building trt engine at
/// runtime.
///
/// \param[in] pd_onfig config
///
PADDLE_CAPI_EXPORT
extern
PD_Bool
PD_ConfigTrtAllowBuildAtRuntime
(
__pd_keep
PD_Config
*
pd_config
);
///
/// \brief Collect shape info of all tensors in compute graph.
///
/// \param[in] pd_onfig config
/// \param[in] shape_range_info_path the path to save shape info.
///
PADDLE_CAPI_EXPORT
extern
void
PD_ConfigCollectShapeRangeInfo
(
__pd_keep
PD_Config
*
pd_config
,
const
char
*
shape_range_info_path
);
///
/// \brief the shape info path in CollectShapeInfo mode.
/// Attention, Please release the string manually.
///
/// \param[in] pd_onfig config
///
PADDLE_CAPI_EXPORT
extern
const
char
*
PD_ConfigShapeRangeInfoPath
(
__pd_keep
PD_Config
*
pd_config
);
///
/// \brief A boolean state telling whether to collect shape info.
///
/// \param[in] pd_onfig config
///
PADDLE_CAPI_EXPORT
extern
PD_Bool
PD_ConfigShapeRangeInfoCollected
(
__pd_keep
PD_Config
*
pd_config
);
///
/// \brief Prevent ops running in Paddle-TRT
/// \brief Prevent ops running in Paddle-TRT
/// NOTE: just experimental, not an official stable API, easy to be broken.
/// NOTE: just experimental, not an official stable API, easy to be broken.
///
///
...
@@ -542,7 +605,7 @@ PADDLE_CAPI_EXPORT extern PD_Bool PD_ConfigModelFromMemory(
...
@@ -542,7 +605,7 @@ PADDLE_CAPI_EXPORT extern PD_Bool PD_ConfigModelFromMemory(
/// \param[in] pd_onfig config
/// \param[in] pd_onfig config
///
///
PADDLE_CAPI_EXPORT
extern
void
PD_ConfigEnableMemoryOptim
(
PADDLE_CAPI_EXPORT
extern
void
PD_ConfigEnableMemoryOptim
(
__pd_keep
PD_Config
*
pd_config
);
__pd_keep
PD_Config
*
pd_config
,
PD_Bool
x
);
///
///
/// \brief A boolean state telling whether the memory optimization is
/// \brief A boolean state telling whether the memory optimization is
/// activated.
/// activated.
...
...
paddle/fluid/inference/goapi/config.go
浏览文件 @
7babb3d2
...
@@ -383,6 +383,71 @@ func (config *Config) SetTRTDynamicShapeInfo(minInputShape map[string][]int32, m
...
@@ -383,6 +383,71 @@ func (config *Config) SetTRTDynamicShapeInfo(minInputShape map[string][]int32, m
cvtGoBoolToPD
(
disableTrtPluginFp16
))
cvtGoBoolToPD
(
disableTrtPluginFp16
))
}
}
///
/// \brief A boolean state telling whether the trt dynamic_shape is used.
///
func
(
config
*
Config
)
TensorRtDynamicShapeEnabled
()
bool
{
return
cvtPDBoolToGo
(
C
.
PD_ConfigTensorRtDynamicShapeEnabled
(
config
.
c
))
}
///
/// \brief Enable tuned tensorrt dynamic shape.
///
/// \param shapeRangeInfoPath the path to shape_info file got in
/// CollectShapeInfo mode.
/// \param allowBuildAtRuntime allow build trt engine at runtime.
///
func
(
config
*
Config
)
EnableTunedTensorRtDynamicShape
(
shapeRangeInfoPath
string
,
allowBuildAtRuntime
bool
)
{
cstr
:=
C
.
CString
(
shapeRangeInfoPath
)
C
.
PD_ConfigEnableTunedTensorRtDynamicShape
(
config
.
c
,
cstr
,
cvtGoBoolToPD
(
allowBuildAtRuntime
))
defer
C
.
free
(
unsafe
.
Pointer
(
cstr
))
}
///
/// \brief A boolean state telling whether to use tuned tensorrt dynamic
/// shape.
///
func
(
config
*
Config
)
TunedTensorRtDynamicShape
()
bool
{
return
cvtPDBoolToGo
(
C
.
PD_ConfigTunedTensorRtDynamicShape
(
config
.
c
))
}
///
/// \brief A boolean state telling whether to allow building trt engine at
/// runtime.
///
func
(
config
*
Config
)
TrtAllowBuildAtRuntime
()
bool
{
return
cvtPDBoolToGo
(
C
.
PD_ConfigTrtAllowBuildAtRuntime
(
config
.
c
))
}
///
/// \brief Collect shape info of all tensors in compute graph.
///
/// \param shapeRangeInfoPath the path to save shape info.
///
func
(
config
*
Config
)
CollectShapeRangeInfo
(
shapeRangeInfoPath
string
)
{
cstr
:=
C
.
CString
(
shapeRangeInfoPath
)
C
.
PD_ConfigCollectShapeRangeInfo
(
config
.
c
,
cstr
)
defer
C
.
free
(
unsafe
.
Pointer
(
cstr
))
}
///
/// \brief the shape info path in CollectShapeInfo mode.
/// Attention, Please release the string manually.
///
func
(
config
*
Config
)
ShapeRangeInfoPath
()
string
{
cstr
:=
C
.
PD_ConfigShapeRangeInfoPath
(
config
.
c
)
str
:=
C
.
GoString
(
cstr
)
C
.
free
(
unsafe
.
Pointer
(
cstr
))
return
str
}
///
/// \brief A boolean state telling whether to collect shape info.
///
func
(
config
*
Config
)
ShapeRangeInfoCollected
()
bool
{
return
cvtPDBoolToGo
(
C
.
PD_ConfigShapeRangeInfoCollected
(
config
.
c
))
}
///
///
/// \brief Prevent ops running in Paddle-TRT
/// \brief Prevent ops running in Paddle-TRT
/// NOTE: just experimental, not an official stable API, easy to be broken.
/// NOTE: just experimental, not an official stable API, easy to be broken.
...
@@ -649,8 +714,8 @@ func (config *Config) ModelFromMemory() bool {
...
@@ -649,8 +714,8 @@ func (config *Config) ModelFromMemory() bool {
/// \brief Turn on memory optimize
/// \brief Turn on memory optimize
/// NOTE still in development.
/// NOTE still in development.
///
///
func
(
config
*
Config
)
EnableMemoryOptim
()
{
func
(
config
*
Config
)
EnableMemoryOptim
(
x
bool
)
{
C
.
PD_ConfigEnableMemoryOptim
(
config
.
c
)
C
.
PD_ConfigEnableMemoryOptim
(
config
.
c
,
cvtGoBoolToPD
(
x
)
)
}
}
///
///
...
...
paddle/fluid/inference/goapi/config_test.go
浏览文件 @
7babb3d2
...
@@ -69,7 +69,7 @@ func TestNewConfig(t *testing.T) {
...
@@ -69,7 +69,7 @@ func TestNewConfig(t *testing.T) {
config
.
EnableMKLDNN
()
config
.
EnableMKLDNN
()
config
.
EnableMemoryOptim
()
config
.
EnableMemoryOptim
(
true
)
t
.
Logf
(
"MemoryOptimEnabled:%+v"
,
config
.
MemoryOptimEnabled
())
t
.
Logf
(
"MemoryOptimEnabled:%+v"
,
config
.
MemoryOptimEnabled
())
config
.
EnableProfile
()
config
.
EnableProfile
()
...
...
paddle/fluid/inference/goapi/predictor_test.go
浏览文件 @
7babb3d2
...
@@ -17,7 +17,9 @@ package paddle
...
@@ -17,7 +17,9 @@ package paddle
import
(
import
(
"io/ioutil"
"io/ioutil"
"os"
"os"
"runtime"
"testing"
"testing"
"time"
)
)
func
TestNewPredictor
(
t
*
testing
.
T
)
{
func
TestNewPredictor
(
t
*
testing
.
T
)
{
...
@@ -106,6 +108,52 @@ func TestFromBuffer(t *testing.T) {
...
@@ -106,6 +108,52 @@ func TestFromBuffer(t *testing.T) {
t
.
Log
(
outData
)
t
.
Log
(
outData
)
}
}
func
TestCollectShapeInfo
(
t
*
testing
.
T
)
{
config
:=
NewConfig
()
config
.
SetModel
(
"./mobilenetv1/inference.pdmodel"
,
"./mobilenetv1/inference.pdiparams"
)
config
.
CollectShapeRangeInfo
(
"shape_range_info.pbtxt"
)
config
.
EnableUseGpu
(
100
,
0
)
t
.
Logf
(
"ShapeRangeInfoCollected:%+v"
,
config
.
ShapeRangeInfoCollected
())
t
.
Logf
(
"ShapeRangeInfoPath:%+v"
,
config
.
ShapeRangeInfoPath
())
predictor
:=
NewPredictor
(
config
)
inNames
:=
predictor
.
GetInputNames
()
outNames
:=
predictor
.
GetOutputNames
()
inHandle
:=
predictor
.
GetInputHandle
(
inNames
[
0
])
inHandle
.
Reshape
([]
int32
{
1
,
3
,
224
,
224
})
data
:=
make
([]
float32
,
numElements
([]
int32
{
1
,
3
,
224
,
224
}))
for
i
:=
0
;
i
<
int
(
numElements
([]
int32
{
1
,
3
,
224
,
224
}));
i
++
{
data
[
i
]
=
float32
(
i
%
255
)
*
0.1
}
inHandle
.
CopyFromCpu
(
data
)
predictor
.
Run
()
outHandle
:=
predictor
.
GetOutputHandle
(
outNames
[
0
])
outShape
:=
outHandle
.
Shape
()
outData
:=
make
([]
float32
,
numElements
(
outShape
))
outHandle
.
CopyToCpu
(
outData
)
// Go is a GC language, so we must wait for gc to get shape_range_info.pbtxt
predictor
=
nil
runtime
.
GC
()
time
.
Sleep
(
2
*
time
.
Second
)
trt_config
:=
NewConfig
()
trt_config
.
SetModel
(
"./mobilenetv1/inference.pdmodel"
,
"./mobilenetv1/inference.pdiparams"
)
trt_config
.
EnableUseGpu
(
100
,
0
)
trt_config
.
EnableTensorRtEngine
(
102400
,
4
,
3
,
PrecisionFloat32
,
false
,
false
)
trt_config
.
EnableTunedTensorRtDynamicShape
(
"shape_range_info.pbtxt"
,
true
)
trt_predictor
:=
NewPredictor
(
trt_config
)
trt_inNames
:=
trt_predictor
.
GetInputNames
()
trt_inHandle
:=
trt_predictor
.
GetInputHandle
(
trt_inNames
[
0
])
trt_inHandle
.
Reshape
([]
int32
{
1
,
3
,
224
,
224
})
trt_inHandle
.
CopyFromCpu
(
data
)
trt_predictor
.
Run
()
}
func
numElements
(
shape
[]
int32
)
int32
{
func
numElements
(
shape
[]
int32
)
int32
{
n
:=
int32
(
1
)
n
:=
int32
(
1
)
for
_
,
v
:=
range
shape
{
for
_
,
v
:=
range
shape
{
...
...
paddle/fluid/inference/goapi/test.sh
浏览文件 @
7babb3d2
...
@@ -24,4 +24,5 @@ fi
...
@@ -24,4 +24,5 @@ fi
export
LD_LIBRARY_PATH
=
$LD_LIBRARY_PATH
:
$PWD
/paddle_inference_c/third_party/install/mklml/lib/:
$PWD
/paddle_inference_c/third_party/install/mkldnn/lib/:
$PWD
/paddle_inference_c/paddle/lib/
export
LD_LIBRARY_PATH
=
$LD_LIBRARY_PATH
:
$PWD
/paddle_inference_c/third_party/install/mklml/lib/:
$PWD
/paddle_inference_c/third_party/install/mkldnn/lib/:
$PWD
/paddle_inference_c/paddle/lib/
# 3. go test
# 3. go test
go clean
-testcache
go
test
-v
./...
go
test
-v
./...
paddle/fluid/inference/tests/api/analyzer_capi_exp_gpu_tester.cc
浏览文件 @
7babb3d2
...
@@ -120,7 +120,7 @@ TEST(PD_Config, use_gpu) {
...
@@ -120,7 +120,7 @@ TEST(PD_Config, use_gpu) {
FALSE
,
FALSE
);
FALSE
,
FALSE
);
bool
trt_enable
=
PD_ConfigTensorRtEngineEnabled
(
config
);
bool
trt_enable
=
PD_ConfigTensorRtEngineEnabled
(
config
);
EXPECT_TRUE
(
trt_enable
);
EXPECT_TRUE
(
trt_enable
);
PD_ConfigEnableMemoryOptim
(
config
);
PD_ConfigEnableMemoryOptim
(
config
,
true
);
bool
memory_optim_enable
=
PD_ConfigMemoryOptimEnabled
(
config
);
bool
memory_optim_enable
=
PD_ConfigMemoryOptimEnabled
(
config
);
EXPECT_TRUE
(
memory_optim_enable
);
EXPECT_TRUE
(
memory_optim_enable
);
PD_ConfigEnableProfile
(
config
);
PD_ConfigEnableProfile
(
config
);
...
...
paddle/fluid/inference/tests/api/analyzer_capi_exp_pd_config_tester.cc
浏览文件 @
7babb3d2
...
@@ -83,7 +83,7 @@ TEST(PD_Config, interface) {
...
@@ -83,7 +83,7 @@ TEST(PD_Config, interface) {
EXPECT_TRUE
(
mkldnn_bf16_enabled
);
EXPECT_TRUE
(
mkldnn_bf16_enabled
);
#endif
#endif
PD_ConfigEnableMemoryOptim
(
config
);
PD_ConfigEnableMemoryOptim
(
config
,
true
);
bool
memory_enabled
=
PD_ConfigMemoryOptimEnabled
(
config
);
bool
memory_enabled
=
PD_ConfigMemoryOptimEnabled
(
config
);
EXPECT_TRUE
(
memory_enabled
);
EXPECT_TRUE
(
memory_enabled
);
...
...
paddle/fluid/inference/tests/api/analyzer_capi_exp_xpu_tester.cc
浏览文件 @
7babb3d2
...
@@ -42,7 +42,7 @@ TEST(PD_Config, use_xpu) {
...
@@ -42,7 +42,7 @@ TEST(PD_Config, use_xpu) {
PD_ConfigSwitchIrOptim
(
config
,
TRUE
);
PD_ConfigSwitchIrOptim
(
config
,
TRUE
);
bool
ir_optim
=
PD_IrOptim
(
config
);
bool
ir_optim
=
PD_IrOptim
(
config
);
EXPECT_TRUE
(
ir_optim
);
EXPECT_TRUE
(
ir_optim
);
PD_ConfigEnableMemoryOptim
(
config
);
PD_ConfigEnableMemoryOptim
(
config
,
true
);
bool
memory_optim_enable
=
PD_ConfigMemoryOptimEnabled
(
config
);
bool
memory_optim_enable
=
PD_ConfigMemoryOptimEnabled
(
config
);
EXPECT_TRUE
(
memory_optim_enable
);
EXPECT_TRUE
(
memory_optim_enable
);
PD_ConfigEnableProfile
(
config
);
PD_ConfigEnableProfile
(
config
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录