Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
68e0560c
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
68e0560c
编写于
8月 28, 2020
作者:
W
Wilber
提交者:
GitHub
8月 28, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refine paddle inference api (#26774)
* refine paddle inference api Co-authored-by:
N
nhzlx
<
nhzlx.dragon@gmail.com
>
上级
4106e54c
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
435 addition
and
10 deletion
+435
-10
paddle/fluid/inference/api/analysis_config.cc
paddle/fluid/inference/api/analysis_config.cc
+2
-3
paddle/fluid/inference/api/analysis_predictor.cc
paddle/fluid/inference/api/analysis_predictor.cc
+121
-1
paddle/fluid/inference/api/api.cc
paddle/fluid/inference/api/api.cc
+6
-0
paddle/fluid/inference/api/api_impl.cc
paddle/fluid/inference/api/api_impl.cc
+4
-0
paddle/fluid/inference/api/paddle_api.h
paddle/fluid/inference/api/paddle_api.h
+3
-3
paddle/fluid/inference/api/paddle_inference_api.h
paddle/fluid/inference/api/paddle_inference_api.h
+115
-0
paddle/fluid/inference/tests/api/CMakeLists.txt
paddle/fluid/inference/tests/api/CMakeLists.txt
+6
-0
paddle/fluid/inference/tests/api/lite_resnet50_test.cc
paddle/fluid/inference/tests/api/lite_resnet50_test.cc
+56
-0
paddle/fluid/inference/tests/api/paddle_infer_api_test.cc
paddle/fluid/inference/tests/api/paddle_infer_api_test.cc
+95
-0
paddle/fluid/inference/tests/api/trt_mobilenet_test.cc
paddle/fluid/inference/tests/api/trt_mobilenet_test.cc
+25
-1
paddle/fluid/pybind/inference_api.cc
paddle/fluid/pybind/inference_api.cc
+2
-2
未找到文件。
paddle/fluid/inference/api/analysis_config.cc
浏览文件 @
68e0560c
...
...
@@ -15,7 +15,6 @@
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/inference/api/paddle_analysis_config.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#include "paddle/fluid/inference/api/paddle_pass_builder.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/gpu_info.h"
...
...
@@ -103,8 +102,8 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
// params_file_ fields.
CP_MEMBER
(
opt_cache_dir_
);
prog_file_
=
std
::
move
(
other
.
prog_file_
);
params_file_
=
std
::
move
(
other
.
params_file_
);
CP_MEMBER
(
prog_file_
);
CP_MEMBER
(
params_file_
);
CP_MEMBER
(
use_fc_padding_
);
// GPU related.
...
...
paddle/fluid/inference/api/analysis_predictor.cc
浏览文件 @
68e0560c
...
...
@@ -32,7 +32,6 @@
#include "paddle/fluid/inference/analysis/helper.h"
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
#include "paddle/fluid/inference/api/helper.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
#include "paddle/fluid/inference/utils/singleton.h"
#include "paddle/fluid/memory/memcpy.h"
...
...
@@ -517,6 +516,8 @@ void AnalysisPredictor::OptimizeInferenceProgram() {
template
<
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
AnalysisConfig
,
PaddleEngineKind
::
kAnalysis
>
(
const
AnalysisConfig
&
config
)
{
// TODO(NHZlX): Should add the link to the doc of
// paddle_infer::CreatePredictor<paddle_infer::Config>
if
(
config
.
glog_info_disabled
())
{
FLAGS_logtostderr
=
1
;
FLAGS_minloglevel
=
2
;
// GLOG_ERROR
...
...
@@ -1058,3 +1059,122 @@ USE_TRT_CONVERTER(skip_layernorm);
USE_TRT_CONVERTER
(
slice
);
USE_TRT_CONVERTER
(
scale
);
#endif
namespace
paddle_infer
{
void
Tensor
::
Reshape
(
const
std
::
vector
<
int
>
&
shape
)
{
tensor_
->
Reshape
(
shape
);
}
std
::
vector
<
int
>
Tensor
::
shape
()
const
{
return
tensor_
->
shape
();
}
void
Tensor
::
SetLoD
(
const
std
::
vector
<
std
::
vector
<
size_t
>>
&
x
)
{
return
tensor_
->
SetLoD
(
x
);
}
std
::
vector
<
std
::
vector
<
size_t
>>
Tensor
::
lod
()
const
{
return
tensor_
->
lod
();
}
const
std
::
string
&
Tensor
::
name
()
const
{
return
tensor_
->
name
();
}
DataType
Tensor
::
type
()
const
{
return
tensor_
->
type
();
}
Predictor
::
Predictor
(
const
Config
&
config
)
{
const_cast
<
Config
*>
(
&
config
)
->
SwitchUseFeedFetchOps
(
false
);
// The second parameter indicates that the discard log is not printed
predictor_
=
paddle
::
CreatePaddlePredictor
<
Config
,
paddle
::
PaddleEngineKind
::
kAnalysis
>
(
config
);
}
std
::
vector
<
std
::
string
>
Predictor
::
GetInputNames
()
{
return
predictor_
->
GetInputNames
();
}
std
::
unique_ptr
<
Tensor
>
Predictor
::
GetInputHandle
(
const
std
::
string
&
name
)
{
auto
zero_copy_tensor
=
predictor_
->
GetInputTensor
(
name
);
std
::
unique_ptr
<
Tensor
>
tensor
(
new
Tensor
(
std
::
move
(
zero_copy_tensor
)));
return
tensor
;
}
std
::
vector
<
std
::
string
>
Predictor
::
GetOutputNames
()
{
return
predictor_
->
GetOutputNames
();
}
std
::
unique_ptr
<
Tensor
>
Predictor
::
GetOutputHandle
(
const
std
::
string
&
name
)
{
auto
zero_copy_tensor
=
predictor_
->
GetOutputTensor
(
name
);
std
::
unique_ptr
<
Tensor
>
tensor
(
new
Tensor
(
std
::
move
(
zero_copy_tensor
)));
return
tensor
;
}
bool
Predictor
::
Run
()
{
return
predictor_
->
ZeroCopyRun
();
}
std
::
unique_ptr
<
Predictor
>
Predictor
::
Clone
()
{
auto
analysis_pred
=
predictor_
->
Clone
();
std
::
unique_ptr
<
Predictor
>
pred
(
new
Predictor
(
std
::
move
(
analysis_pred
)));
return
pred
;
}
void
Predictor
::
ClearIntermediateTensor
()
{
predictor_
->
ClearIntermediateTensor
();
}
int
GetNumBytesOfDataType
(
DataType
dtype
)
{
switch
(
dtype
)
{
case
DataType
::
FLOAT32
:
return
sizeof
(
float
);
case
DataType
::
INT64
:
return
sizeof
(
int64_t
);
case
DataType
::
INT32
:
return
sizeof
(
int32_t
);
case
DataType
::
UINT8
:
return
sizeof
(
uint8_t
);
default:
assert
(
false
);
return
-
1
;
}
}
std
::
string
GetVersion
()
{
return
paddle
::
get_version
();
}
std
::
string
UpdateDllFlag
(
const
char
*
name
,
const
char
*
value
)
{
return
paddle
::
UpdateDllFlag
(
name
,
value
);
}
}
// namespace paddle_infer
namespace
paddle_infer
{
std
::
shared_ptr
<
Predictor
>
CreatePredictor
(
const
Config
&
config
)
{
// NOLINT
std
::
shared_ptr
<
Predictor
>
predictor
(
new
Predictor
(
config
));
return
predictor
;
}
namespace
services
{
PredictorPool
::
PredictorPool
(
const
Config
&
config
,
size_t
size
)
{
PADDLE_ENFORCE_GE
(
size
,
1UL
,
paddle
::
platform
::
errors
::
InvalidArgument
(
"The predictor pool size should be greater than 1, but it's (%d)"
,
size
));
Config
copy_config
(
config
);
main_pred_
.
reset
(
new
Predictor
(
config
));
for
(
size_t
i
=
0
;
i
<
size
-
1
;
i
++
)
{
if
(
config
.
tensorrt_engine_enabled
())
{
Config
config_tmp
(
copy_config
);
preds_
.
push_back
(
std
::
move
(
std
::
unique_ptr
<
Predictor
>
(
new
Predictor
(
config_tmp
))));
}
else
{
preds_
.
push_back
(
std
::
move
(
main_pred_
->
Clone
()));
}
}
}
Predictor
*
PredictorPool
::
Retrive
(
size_t
idx
)
{
PADDLE_ENFORCE_LT
(
idx
,
preds_
.
size
()
+
1
,
paddle
::
platform
::
errors
::
InvalidArgument
(
"There are (%d) predictors in the pool, but the idx is (%d)"
,
idx
,
preds_
.
size
()
+
1
));
if
(
idx
==
0
)
{
return
main_pred_
.
get
();
}
return
preds_
[
idx
-
1
].
get
();
}
}
// namespace services
}
// namespace paddle_infer
paddle/fluid/inference/api/api.cc
浏览文件 @
68e0560c
...
...
@@ -112,6 +112,12 @@ void PaddleBuf::Free() {
}
}
NativeConfig
::
NativeConfig
()
{
LOG
(
WARNING
)
<<
"The paddle::NativeConfig interface is going to be "
"deprecated in the next release, plase use the latest "
"paddle_infer::Config instead."
;
}
std
::
string
get_version
()
{
std
::
stringstream
ss
;
ss
<<
"version: "
<<
framework
::
paddle_version
()
<<
"
\n
"
;
...
...
paddle/fluid/inference/api/api_impl.cc
浏览文件 @
68e0560c
...
...
@@ -15,6 +15,7 @@ limitations under the License. */
#include <glog/logging.h>
#include <algorithm>
#include <map>
#include <memory>
#include <set>
#include <sstream>
#include <string>
...
...
@@ -25,6 +26,7 @@ limitations under the License. */
#include "paddle/fluid/inference/api/api_impl.h"
#include "paddle/fluid/inference/api/details/reset_tensor_array.h"
#include "paddle/fluid/inference/api/helper.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/cpu_helper.h"
#include "paddle/fluid/platform/profiler.h"
...
...
@@ -311,6 +313,8 @@ bool NativePaddlePredictor::GetFetch(std::vector<PaddleTensor> *outputs,
template
<
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
NativeConfig
,
PaddleEngineKind
::
kNative
>
(
const
NativeConfig
&
config
)
{
// TODO(NHZlX): Should add the link to the doc of
// paddle_infer::CreatePredictor<paddle_infer::Config>
VLOG
(
3
)
<<
"create NativePaddlePredictor"
;
if
(
config
.
use_gpu
)
{
// 1. GPU memory
...
...
paddle/fluid/inference/api/paddle_api.h
浏览文件 @
68e0560c
...
...
@@ -347,6 +347,7 @@ class PD_INFER_DECL PaddlePredictor {
/// place of inference, etc.)
///
struct
PD_INFER_DECL
NativeConfig
:
public
PaddlePredictor
::
Config
{
NativeConfig
();
/// GPU related fields.
bool
use_gpu
{
false
};
int
device
{
0
};
...
...
@@ -421,7 +422,8 @@ enum class PaddleEngineKind {
};
template
<
typename
ConfigT
,
PaddleEngineKind
engine
>
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
(
const
ConfigT
&
config
);
PD_INFER_DECL
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
(
const
ConfigT
&
config
);
template
<
>
PD_INFER_DECL
std
::
unique_ptr
<
PaddlePredictor
>
CreatePaddlePredictor
<
...
...
@@ -437,6 +439,4 @@ PD_INFER_DECL std::string get_version();
PD_INFER_DECL
std
::
string
UpdateDllFlag
(
const
char
*
name
,
const
char
*
value
);
PD_INFER_DECL
std
::
shared_ptr
<
framework
::
Cipher
>
MakeCipher
(
const
std
::
string
&
config_file
);
}
// namespace paddle
paddle/fluid/inference/api/paddle_inference_api.h
浏览文件 @
68e0560c
...
...
@@ -22,9 +22,124 @@ limitations under the License. */
#pragma once
#include <cassert>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "paddle_analysis_config.h" // NOLINT
#include "paddle_api.h" // NOLINT
namespace
paddle_infer
{
using
DataType
=
paddle
::
PaddleDType
;
using
PlaceType
=
paddle
::
PaddlePlace
;
using
PrecisionType
=
paddle
::
AnalysisConfig
::
Precision
;
using
Config
=
paddle
::
AnalysisConfig
;
class
PD_INFER_DECL
Tensor
{
public:
// Can only be created by predictor->GetInputHandle(cosnt std::string& name)
// or predictor->GetOutputHandle(cosnt std::string& name)
Tensor
()
=
delete
;
explicit
Tensor
(
std
::
unique_ptr
<
paddle
::
ZeroCopyTensor
>&&
tensor
)
:
tensor_
(
std
::
move
(
tensor
))
{}
void
Reshape
(
const
std
::
vector
<
int
>&
shape
);
template
<
typename
T
>
void
CopyFromCpu
(
const
T
*
data
);
// should add the place
template
<
typename
T
>
T
*
mutable_data
(
PlaceType
place
);
template
<
typename
T
>
void
CopyToCpu
(
T
*
data
);
template
<
typename
T
>
T
*
data
(
PlaceType
*
place
,
int
*
size
)
const
;
void
SetLoD
(
const
std
::
vector
<
std
::
vector
<
size_t
>>&
x
);
std
::
vector
<
std
::
vector
<
size_t
>>
lod
()
const
;
DataType
type
()
const
;
std
::
vector
<
int
>
shape
()
const
;
const
std
::
string
&
name
()
const
;
private:
std
::
unique_ptr
<
paddle
::
ZeroCopyTensor
>
tensor_
;
};
class
PD_INFER_DECL
Predictor
{
public:
Predictor
()
=
default
;
~
Predictor
()
{}
// Use for clone
explicit
Predictor
(
std
::
unique_ptr
<
paddle
::
PaddlePredictor
>&&
pred
)
:
predictor_
(
std
::
move
(
pred
))
{}
explicit
Predictor
(
const
Config
&
config
);
std
::
vector
<
std
::
string
>
GetInputNames
();
std
::
unique_ptr
<
Tensor
>
GetInputHandle
(
const
std
::
string
&
name
);
bool
Run
();
std
::
vector
<
std
::
string
>
GetOutputNames
();
std
::
unique_ptr
<
Tensor
>
GetOutputHandle
(
const
std
::
string
&
name
);
std
::
unique_ptr
<
Predictor
>
Clone
();
void
ClearIntermediateTensor
();
private:
std
::
unique_ptr
<
paddle
::
PaddlePredictor
>
predictor_
;
};
PD_INFER_DECL
std
::
shared_ptr
<
Predictor
>
CreatePredictor
(
const
Config
&
config
);
// NOLINT
PD_INFER_DECL
int
GetNumBytesOfDataType
(
DataType
dtype
);
PD_INFER_DECL
std
::
string
GetVersion
();
PD_INFER_DECL
std
::
string
UpdateDllFlag
(
const
char
*
name
,
const
char
*
value
);
template
<
typename
T
>
void
Tensor
::
CopyFromCpu
(
const
T
*
data
)
{
tensor_
->
copy_from_cpu
<
T
>
(
data
);
}
template
<
typename
T
>
void
Tensor
::
CopyToCpu
(
T
*
data
)
{
return
tensor_
->
copy_to_cpu
<
T
>
(
data
);
}
template
<
typename
T
>
T
*
Tensor
::
mutable_data
(
PlaceType
place
)
{
return
tensor_
->
mutable_data
<
T
>
(
place
);
}
template
<
typename
T
>
T
*
Tensor
::
data
(
PlaceType
*
place
,
int
*
size
)
const
{
return
tensor_
->
data
<
T
>
(
place
,
size
);
}
}
// namespace paddle_infer
namespace
paddle_infer
{
namespace
services
{
class
PD_INFER_DECL
PredictorPool
{
public:
PredictorPool
()
=
delete
;
PredictorPool
(
const
PredictorPool
&
)
=
delete
;
PredictorPool
&
operator
=
(
const
PredictorPool
&
)
=
delete
;
explicit
PredictorPool
(
const
Config
&
config
,
size_t
size
=
1
);
Predictor
*
Retrive
(
size_t
idx
);
private:
std
::
shared_ptr
<
Predictor
>
main_pred_
;
std
::
vector
<
std
::
unique_ptr
<
Predictor
>>
preds_
;
};
}
// namespace services
}
// namespace paddle_infer
paddle/fluid/inference/tests/api/CMakeLists.txt
浏览文件 @
68e0560c
...
...
@@ -515,3 +515,9 @@ if(WITH_MKLDNN)
inference_analysis_test
(
test_analyzer_capi_ner SRCS analyzer_capi_ner_tester.cc
EXTRA_DEPS
${
INFERENCE_EXTRA_DEPS
}
paddle_fluid_c
ARGS --infer_model=
${
CHINESE_NER_INSTALL_DIR
}
/model
)
if
(
WITH_GPU
)
inference_analysis_test
(
paddle_infer_api_test SRCS paddle_infer_api_test.cc
EXTRA_DEPS
${
INFERENCE_EXTRA_DEPS
}
ARGS --infer_model=
${
RESNET50_MODEL_DIR
}
)
endif
()
paddle/fluid/inference/tests/api/lite_resnet50_test.cc
浏览文件 @
68e0560c
...
...
@@ -72,3 +72,59 @@ TEST(AnalysisPredictor, use_gpu) {
}
// namespace inference
}
// namespace paddle
namespace
paddle_infer
{
TEST
(
Predictor
,
use_gpu
)
{
std
::
string
model_dir
=
FLAGS_infer_model
+
"/"
+
"model"
;
Config
config
;
config
.
EnableUseGpu
(
100
,
0
);
config
.
SetModel
(
model_dir
+
"/model"
,
model_dir
+
"/params"
);
config
.
EnableLiteEngine
(
PrecisionType
::
kFloat32
);
auto
predictor
=
CreatePredictor
(
config
);
const
int
batch
=
1
;
const
int
channel
=
3
;
const
int
height
=
318
;
const
int
width
=
318
;
const
int
input_num
=
batch
*
channel
*
height
*
width
;
std
::
vector
<
float
>
input
(
input_num
,
1
);
auto
input_names
=
predictor
->
GetInputNames
();
auto
input_t
=
predictor
->
GetInputHandle
(
input_names
[
0
]);
input_t
->
Reshape
({
1
,
3
,
318
,
318
});
input_t
->
CopyFromCpu
(
input
.
data
());
predictor
->
Run
();
auto
output_names
=
predictor
->
GetOutputNames
();
auto
output_t
=
predictor
->
GetOutputHandle
(
output_names
[
0
]);
std
::
vector
<
int
>
output_shape
=
output_t
->
shape
();
size_t
out_num
=
std
::
accumulate
(
output_shape
.
begin
(),
output_shape
.
end
(),
1
,
std
::
multiplies
<
int
>
());
std
::
vector
<
float
>
out_data
;
out_data
.
resize
(
out_num
);
output_t
->
CopyToCpu
(
out_data
.
data
());
const
std
::
vector
<
float
>
truth_values
=
{
127.780396
f
,
738.16656
f
,
1013.2264
f
,
-
438.17206
f
,
366.4022
f
,
927.66187
f
,
736.2241
f
,
-
633.68567
f
,
-
329.92737
f
,
-
430.15637
f
,
-
633.0639
f
,
-
146.54858
f
,
-
1324.2804
f
,
-
1349.3661
f
,
-
242.67671
f
,
117.44864
f
,
-
801.7251
f
,
-
391.51495
f
,
-
404.8202
f
,
454.16132
f
,
515.48206
f
,
-
133.03114
f
,
69.293076
f
,
590.09753
f
,
-
1434.6917
f
,
-
1070.8903
f
,
307.0744
f
,
400.52573
f
,
-
316.12177
f
,
-
587.1265
f
,
-
161.05742
f
,
800.3663
f
,
-
96.47157
f
,
748.708
f
,
868.17645
f
,
-
447.9403
f
,
112.73656
f
,
1127.1992
f
,
47.43518
f
,
677.7219
f
,
593.1881
f
,
-
336.4011
f
,
551.3634
f
,
397.82474
f
,
78.39835
f
,
-
715.4006
f
,
405.96988
f
,
404.25684
f
,
246.01978
f
,
-
8.430191
f
,
131.36617
f
,
-
648.0528
f
};
float
*
data_o
=
out_data
.
data
();
for
(
size_t
j
=
0
;
j
<
out_num
;
j
+=
10
)
{
EXPECT_NEAR
((
data_o
[
j
]
-
truth_values
[
j
/
10
])
/
truth_values
[
j
/
10
],
0.
,
10e-5
);
}
}
}
// namespace paddle_infer
paddle/fluid/inference/tests/api/paddle_infer_api_test.cc
0 → 100644
浏览文件 @
68e0560c
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cuda_runtime.h>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <gtest/gtest.h>
#include <cstring>
#include <numeric>
#include "paddle/fluid/inference/tests/api/trt_test_helper.h"
namespace
paddle_infer
{
TEST
(
Predictor
,
use_gpu
)
{
LOG
(
INFO
)
<<
GetVersion
();
UpdateDllFlag
(
"conv_workspace_size_limit"
,
"4000"
);
std
::
string
model_dir
=
FLAGS_infer_model
+
"/model"
;
Config
config
;
config
.
SetModel
(
model_dir
+
"/model"
,
model_dir
+
"/params"
);
config
.
EnableUseGpu
(
100
,
0
);
auto
predictor
=
CreatePredictor
(
config
);
auto
pred_clone
=
predictor
->
Clone
();
std
::
vector
<
int
>
in_shape
=
{
1
,
3
,
318
,
318
};
int
in_num
=
std
::
accumulate
(
in_shape
.
begin
(),
in_shape
.
end
(),
1
,
[](
int
&
a
,
int
&
b
)
{
return
a
*
b
;
});
std
::
vector
<
float
>
input
(
in_num
,
0
);
auto
input_names
=
predictor
->
GetInputNames
();
auto
input_t
=
predictor
->
GetInputHandle
(
input_names
[
0
]);
input_t
->
Reshape
(
in_shape
);
input_t
->
CopyFromCpu
(
input
.
data
());
predictor
->
Run
();
auto
output_names
=
predictor
->
GetOutputNames
();
auto
output_t
=
predictor
->
GetOutputHandle
(
output_names
[
0
]);
std
::
vector
<
int
>
output_shape
=
output_t
->
shape
();
int
out_num
=
std
::
accumulate
(
output_shape
.
begin
(),
output_shape
.
end
(),
1
,
std
::
multiplies
<
int
>
());
std
::
vector
<
float
>
out_data
;
out_data
.
resize
(
out_num
);
output_t
->
CopyToCpu
(
out_data
.
data
());
predictor
->
ClearIntermediateTensor
();
}
TEST
(
PredictorPool
,
basic
)
{
LOG
(
INFO
)
<<
GetVersion
();
UpdateDllFlag
(
"conv_workspace_size_limit"
,
"4000"
);
std
::
string
model_dir
=
FLAGS_infer_model
+
"/model"
;
Config
config
;
config
.
SetModel
(
model_dir
+
"/model"
,
model_dir
+
"/params"
);
config
.
EnableUseGpu
(
100
,
0
);
services
::
PredictorPool
pred_pool
(
config
,
4
);
auto
pred
=
pred_pool
.
Retrive
(
2
);
std
::
vector
<
int
>
in_shape
=
{
1
,
3
,
318
,
318
};
int
in_num
=
std
::
accumulate
(
in_shape
.
begin
(),
in_shape
.
end
(),
1
,
[](
int
&
a
,
int
&
b
)
{
return
a
*
b
;
});
std
::
vector
<
float
>
input
(
in_num
,
0
);
auto
in_names
=
pred
->
GetInputNames
();
auto
input_t
=
pred
->
GetInputHandle
(
in_names
[
0
]);
input_t
->
name
();
input_t
->
Reshape
(
in_shape
);
input_t
->
CopyFromCpu
(
input
.
data
());
pred
->
Run
();
auto
out_names
=
pred
->
GetOutputNames
();
auto
output_t
=
pred
->
GetOutputHandle
(
out_names
[
0
]);
auto
out_type
=
output_t
->
type
();
LOG
(
INFO
)
<<
GetNumBytesOfDataType
(
out_type
);
if
(
out_type
==
DataType
::
FLOAT32
)
{
PlaceType
place
;
int
size
;
output_t
->
data
<
float
>
(
&
place
,
&
size
);
}
}
}
// namespace paddle_infer
paddle/fluid/inference/tests/api/trt_mobilenet_test.cc
浏览文件 @
68e0560c
...
...
@@ -41,7 +41,7 @@ TEST(AnalysisPredictor, use_gpu) {
SetFakeImageInput
(
&
inputs_all
,
model_dir
,
false
,
"__model__"
,
""
);
std
::
vector
<
PaddleTensor
>
outputs
;
for
(
auto
&
input
:
inputs_all
)
{
for
(
auto
&
input
:
inputs_all
)
{
ASSERT_TRUE
(
predictor
->
Run
(
input
,
&
outputs
));
predictor
->
ClearIntermediateTensor
();
}
...
...
@@ -49,3 +49,27 @@ TEST(AnalysisPredictor, use_gpu) {
}
// namespace inference
}
// namespace paddle
namespace
paddle_infer
{
TEST
(
PredictorPool
,
use_gpu
)
{
std
::
string
model_dir
=
FLAGS_infer_model
+
"/"
+
"mobilenet"
;
Config
config
;
config
.
EnableUseGpu
(
100
,
0
);
config
.
SetModel
(
model_dir
);
config
.
EnableTensorRtEngine
();
services
::
PredictorPool
pred_pool
(
config
,
1
);
auto
predictor
=
pred_pool
.
Retrive
(
0
);
auto
input_names
=
predictor
->
GetInputNames
();
auto
input_t
=
predictor
->
GetInputHandle
(
input_names
[
0
]);
std
::
vector
<
int
>
in_shape
=
{
1
,
3
,
224
,
224
};
int
in_num
=
std
::
accumulate
(
in_shape
.
begin
(),
in_shape
.
end
(),
1
,
[](
int
&
a
,
int
&
b
)
{
return
a
*
b
;
});
std
::
vector
<
float
>
input
(
in_num
,
0
);
input_t
->
Reshape
(
in_shape
);
input_t
->
CopyFromCpu
(
input
.
data
());
predictor
->
Run
();
}
}
// namespace paddle_infer
paddle/fluid/pybind/inference_api.cc
浏览文件 @
68e0560c
...
...
@@ -206,9 +206,9 @@ void BindInferenceApi(py::module *m) {
BindMkldnnQuantizerConfig
(
m
);
#endif
m
->
def
(
"create_paddle_predictor"
,
&
paddle
::
CreatePaddlePredictor
<
AnalysisConfig
>
);
&
paddle
::
CreatePaddlePredictor
<
AnalysisConfig
>
,
py
::
arg
(
"config"
)
);
m
->
def
(
"create_paddle_predictor"
,
&
paddle
::
CreatePaddlePredictor
<
NativeConfig
>
);
&
paddle
::
CreatePaddlePredictor
<
NativeConfig
>
,
py
::
arg
(
"config"
)
);
m
->
def
(
"paddle_dtype_size"
,
&
paddle
::
PaddleDtypeSize
);
m
->
def
(
"paddle_tensor_to_bytes"
,
&
SerializePDTensorToBytes
);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录