Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
be6a8330
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
be6a8330
编写于
10月 19, 2021
作者:
W
Wilber
提交者:
GitHub
10月 19, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Inference add type check in copy_from_cpu (#36429)
* update * fix ut error * update ut
上级
6cdc5a4b
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
144 addition
and
5 deletion
+144
-5
paddle/fluid/inference/api/analysis_predictor.cc
paddle/fluid/inference/api/analysis_predictor.cc
+18
-0
paddle/fluid/inference/api/analysis_predictor_tester.cc
paddle/fluid/inference/api/analysis_predictor_tester.cc
+9
-0
paddle/fluid/inference/api/paddle_inference_api.h
paddle/fluid/inference/api/paddle_inference_api.h
+2
-0
paddle/fluid/inference/tensorrt/engine.cc
paddle/fluid/inference/tensorrt/engine.cc
+13
-0
paddle/fluid/inference/tensorrt/helper.h
paddle/fluid/inference/tensorrt/helper.h
+16
-0
paddle/fluid/pybind/inference_api.cc
paddle/fluid/pybind/inference_api.cc
+7
-4
python/paddle/fluid/inference/__init__.py
python/paddle/fluid/inference/__init__.py
+1
-1
python/paddle/fluid/inference/wrapper.py
python/paddle/fluid/inference/wrapper.py
+15
-0
python/paddle/fluid/tests/unittests/test_inference_api.py
python/paddle/fluid/tests/unittests/test_inference_api.py
+59
-0
python/paddle/inference/__init__.py
python/paddle/inference/__init__.py
+4
-0
未找到文件。
paddle/fluid/inference/api/analysis_predictor.cc
浏览文件 @
be6a8330
...
...
@@ -36,6 +36,7 @@
#include "paddle/fluid/inference/analysis/helper.h"
#include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h"
#include "paddle/fluid/inference/api/helper.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
#include "paddle/fluid/inference/utils/io_utils.h"
#include "paddle/fluid/inference/utils/singleton.h"
...
...
@@ -56,6 +57,7 @@
#if PADDLE_WITH_TENSORRT
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/helper.h"
#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h"
#endif
...
...
@@ -1471,6 +1473,22 @@ int GetNumBytesOfDataType(DataType dtype) {
std
::
string
GetVersion
()
{
return
paddle
::
get_version
();
}
std
::
tuple
<
int
,
int
,
int
>
GetTrtCompileVersion
()
{
#ifdef PADDLE_WITH_TENSORRT
return
paddle
::
inference
::
tensorrt
::
GetTrtCompileVersion
();
#else
return
std
::
tuple
<
int
,
int
,
int
>
{
0
,
0
,
0
};
#endif
}
std
::
tuple
<
int
,
int
,
int
>
GetTrtRuntimeVersion
()
{
#ifdef PADDLE_WITH_TENSORRT
return
paddle
::
inference
::
tensorrt
::
GetTrtRuntimeVersion
();
#else
return
std
::
tuple
<
int
,
int
,
int
>
{
0
,
0
,
0
};
#endif
}
std
::
string
UpdateDllFlag
(
const
char
*
name
,
const
char
*
value
)
{
return
paddle
::
UpdateDllFlag
(
name
,
value
);
}
...
...
paddle/fluid/inference/api/analysis_predictor_tester.cc
浏览文件 @
be6a8330
...
...
@@ -359,6 +359,15 @@ TEST(AnalysisPredictor, set_xpu_device_id) {
namespace
paddle_infer
{
TEST
(
Predictor
,
Run
)
{
auto
trt_compile_ver
=
GetTrtCompileVersion
();
auto
trt_runtime_ver
=
GetTrtRuntimeVersion
();
LOG
(
INFO
)
<<
"trt compile version: "
<<
std
::
get
<
0
>
(
trt_compile_ver
)
<<
"."
<<
std
::
get
<
1
>
(
trt_compile_ver
)
<<
"."
<<
std
::
get
<
2
>
(
trt_compile_ver
);
LOG
(
INFO
)
<<
"trt runtime version: "
<<
std
::
get
<
0
>
(
trt_runtime_ver
)
<<
"."
<<
std
::
get
<
1
>
(
trt_runtime_ver
)
<<
"."
<<
std
::
get
<
2
>
(
trt_runtime_ver
);
Config
config
;
config
.
SetModel
(
FLAGS_dirname
);
...
...
paddle/fluid/inference/api/paddle_inference_api.h
浏览文件 @
be6a8330
...
...
@@ -169,6 +169,8 @@ PD_INFER_DECL std::shared_ptr<Predictor> CreatePredictor(
PD_INFER_DECL
int
GetNumBytesOfDataType
(
DataType
dtype
);
PD_INFER_DECL
std
::
string
GetVersion
();
PD_INFER_DECL
std
::
tuple
<
int
,
int
,
int
>
GetTrtCompileVersion
();
PD_INFER_DECL
std
::
tuple
<
int
,
int
,
int
>
GetTrtRuntimeVersion
();
PD_INFER_DECL
std
::
string
UpdateDllFlag
(
const
char
*
name
,
const
char
*
value
);
namespace
services
{
...
...
paddle/fluid/inference/tensorrt/engine.cc
浏览文件 @
be6a8330
...
...
@@ -190,6 +190,19 @@ void TensorRTEngine::FreezeNetwork() {
#if IS_TRT_VERSION_GE(6000)
LOG
(
INFO
)
<<
"Run Paddle-TRT Dynamic Shape mode."
;
for
(
auto
&
input
:
min_input_shape_
)
{
#if IS_TRT_VERSION_LT(7000)
// trt6 will check all_of input > 0
if
(
!
(
std
::
all_of
(
input
.
second
.
begin
(),
input
.
second
.
end
(),
[](
int
x
)
{
return
x
>
0
;
})
&&
std
::
all_of
(
max_input_shape_
[
input
.
first
].
begin
(),
max_input_shape_
[
input
.
first
].
end
(),
[](
int
x
)
{
return
x
>
0
;
})
&&
std
::
all_of
(
optim_input_shape_
[
input
.
first
].
begin
(),
optim_input_shape_
[
input
.
first
].
end
(),
[](
int
x
)
{
return
x
>
0
;
})))
{
continue
;
}
#endif
VLOG
(
4
)
<<
"TRT dynamic_shape set "
<<
input
.
first
<<
" min: "
<<
Vec2Str
(
input
.
second
)
<<
", max: "
<<
Vec2Str
(
max_input_shape_
[
input
.
first
])
...
...
paddle/fluid/inference/tensorrt/helper.h
浏览文件 @
be6a8330
...
...
@@ -73,8 +73,24 @@ static nvinfer1::IPluginRegistry* GetPluginRegistry() {
static
int
GetInferLibVersion
()
{
return
static_cast
<
int
>
(
dy
::
getInferLibVersion
());
}
#else
static
int
GetInferLibVersion
()
{
return
0
;
}
#endif
static
std
::
tuple
<
int
,
int
,
int
>
GetTrtRuntimeVersion
()
{
int
ver
=
GetInferLibVersion
();
int
major
=
ver
/
1000
;
ver
-=
major
*
1000
;
int
minor
=
ver
/
100
;
int
patch
=
ver
-
minor
*
100
;
return
std
::
tuple
<
int
,
int
,
int
>
{
major
,
minor
,
patch
};
}
static
std
::
tuple
<
int
,
int
,
int
>
GetTrtCompileVersion
()
{
return
std
::
tuple
<
int
,
int
,
int
>
{
NV_TENSORRT_MAJOR
,
NV_TENSORRT_MINOR
,
NV_TENSORRT_PATCH
};
}
// A logger for create TensorRT infer builder.
class
NaiveLogger
:
public
nvinfer1
::
ILogger
{
public:
...
...
paddle/fluid/pybind/inference_api.cc
浏览文件 @
be6a8330
...
...
@@ -330,6 +330,8 @@ void BindInferenceApi(py::module *m) {
m
->
def
(
"paddle_dtype_size"
,
&
paddle
::
PaddleDtypeSize
);
m
->
def
(
"paddle_tensor_to_bytes"
,
&
SerializePDTensorToBytes
);
m
->
def
(
"get_version"
,
&
paddle_infer
::
GetVersion
);
m
->
def
(
"get_trt_compile_version"
,
&
paddle_infer
::
GetTrtCompileVersion
);
m
->
def
(
"get_trt_runtime_version"
,
&
paddle_infer
::
GetTrtRuntimeVersion
);
m
->
def
(
"get_num_bytes_of_data_type"
,
&
paddle_infer
::
GetNumBytesOfDataType
);
}
...
...
@@ -739,10 +741,11 @@ void BindZeroCopyTensor(py::module *m) {
void
BindPaddleInferTensor
(
py
::
module
*
m
)
{
py
::
class_
<
paddle_infer
::
Tensor
>
(
*
m
,
"PaddleInferTensor"
)
.
def
(
"reshape"
,
&
paddle_infer
::
Tensor
::
Reshape
)
.
def
(
"copy_from_cpu"
,
&
PaddleInferTensorCreate
<
int32_t
>
)
.
def
(
"copy_from_cpu"
,
&
PaddleInferTensorCreate
<
int64_t
>
)
.
def
(
"copy_from_cpu"
,
&
PaddleInferTensorCreate
<
float
>
)
.
def
(
"copy_from_cpu"
,
&
PaddleInferTensorCreate
<
paddle_infer
::
float16
>
)
.
def
(
"copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
int32_t
>
)
.
def
(
"copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
int64_t
>
)
.
def
(
"copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
float
>
)
.
def
(
"copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
paddle_infer
::
float16
>
)
.
def
(
"copy_to_cpu"
,
&
PaddleInferTensorToNumpy
)
.
def
(
"shape"
,
&
paddle_infer
::
Tensor
::
shape
)
.
def
(
"set_lod"
,
&
paddle_infer
::
Tensor
::
SetLoD
)
...
...
python/paddle/fluid/inference/__init__.py
浏览文件 @
be6a8330
...
...
@@ -14,4 +14,4 @@
from
.wrapper
import
Config
,
DataType
,
PlaceType
,
PrecisionType
,
Tensor
,
Predictor
from
..core
import
create_predictor
,
get_version
,
get_num_bytes_of_data_type
,
PredictorPool
from
..core
import
create_predictor
,
get_version
,
get_num_bytes_of_data_type
,
PredictorPool
,
get_trt_compile_version
,
get_trt_runtime_version
python/paddle/fluid/inference/wrapper.py
浏览文件 @
be6a8330
...
...
@@ -15,9 +15,24 @@
from
..core
import
AnalysisConfig
,
PaddleDType
,
PaddlePlace
from
..core
import
PaddleInferPredictor
,
PaddleInferTensor
import
numpy
as
np
DataType
=
PaddleDType
PlaceType
=
PaddlePlace
PrecisionType
=
AnalysisConfig
.
Precision
Config
=
AnalysisConfig
Tensor
=
PaddleInferTensor
Predictor
=
PaddleInferPredictor
def
tensor_copy_from_cpu
(
self
,
data
):
'''
Support input type check based on tensor.copy_from_cpu.
'''
if
not
isinstance
(
data
,
np
.
ndarray
):
raise
TypeError
(
"In copy_from_cpu, we only support numpy ndarray data type."
)
self
.
copy_from_cpu_bind
(
data
)
Tensor
.
copy_from_cpu
=
tensor_copy_from_cpu
python/paddle/fluid/tests/unittests/test_inference_api.py
浏览文件 @
be6a8330
...
...
@@ -14,10 +14,14 @@
import
os
,
shutil
import
unittest
import
paddle
paddle
.
enable_static
()
import
numpy
as
np
import
paddle.fluid
as
fluid
from
paddle.fluid.core
import
PaddleTensor
from
paddle.fluid.core
import
PaddleDType
from
paddle.inference
import
Config
,
Predictor
,
create_predictor
from
paddle.inference
import
get_trt_compile_version
,
get_trt_runtime_version
class
TestInferenceApi
(
unittest
.
TestCase
):
...
...
@@ -54,5 +58,60 @@ class TestInferenceApi(unittest.TestCase):
tensor_float
.
ravel
().
tolist
())
def
get_sample_model
():
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
main_program
=
fluid
.
Program
()
startup_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
main_program
,
startup_program
):
data
=
fluid
.
data
(
name
=
"data"
,
shape
=
[
-
1
,
6
,
64
,
64
],
dtype
=
"float32"
)
conv_out
=
fluid
.
layers
.
conv2d
(
input
=
data
,
num_filters
=
3
,
filter_size
=
3
,
groups
=
1
,
padding
=
0
,
bias_attr
=
False
,
act
=
None
)
exe
.
run
(
startup_program
)
serialized_program
=
paddle
.
static
.
serialize_program
(
data
,
conv_out
,
program
=
main_program
)
serialized_params
=
paddle
.
static
.
serialize_persistables
(
data
,
conv_out
,
executor
=
exe
,
program
=
main_program
)
return
serialized_program
,
serialized_params
class
TestInferenceBaseAPI
(
unittest
.
TestCase
):
def
get_config
(
self
,
model
,
params
):
config
=
Config
()
config
.
set_model_buffer
(
model
,
len
(
model
),
params
,
len
(
params
))
config
.
enable_use_gpu
(
100
,
0
)
return
config
def
test_apis
(
self
):
print
(
'trt compile version:'
,
get_trt_compile_version
())
print
(
'trt runtime version:'
,
get_trt_runtime_version
())
program
,
params
=
get_sample_model
()
config
=
self
.
get_config
(
program
,
params
)
predictor
=
create_predictor
(
config
)
in_names
=
predictor
.
get_input_names
()
in_handle
=
predictor
.
get_input_handle
(
in_names
[
0
])
in_data
=
np
.
ones
((
1
,
6
,
32
,
32
)).
astype
(
np
.
float32
)
in_handle
.
copy_from_cpu
(
in_data
)
predictor
.
run
()
def
test_wrong_input
(
self
):
with
self
.
assertRaises
(
TypeError
):
program
,
params
=
get_sample_model
()
config
=
self
.
get_config
(
program
,
params
)
predictor
=
create_predictor
(
config
)
in_names
=
predictor
.
get_input_names
()
in_handle
=
predictor
.
get_input_handle
(
in_names
[
0
])
in_data
=
np
.
ones
((
1
,
6
,
64
,
64
)).
astype
(
np
.
float32
)
in_handle
.
copy_from_cpu
(
list
(
in_data
))
predictor
.
run
()
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/inference/__init__.py
浏览文件 @
be6a8330
...
...
@@ -20,6 +20,8 @@ from ..fluid.inference import Tensor # noqa: F401
from
..fluid.inference
import
Predictor
# noqa: F401
from
..fluid.inference
import
create_predictor
# noqa: F401
from
..fluid.inference
import
get_version
# noqa: F401
from
..fluid.inference
import
get_trt_compile_version
# noqa: F401
from
..fluid.inference
import
get_trt_runtime_version
# noqa: F401
from
..fluid.inference
import
get_num_bytes_of_data_type
# noqa: F401
from
..fluid.inference
import
PredictorPool
# noqa: F401
...
...
@@ -32,6 +34,8 @@ __all__ = [ # noqa
'Predictor'
,
'create_predictor'
,
'get_version'
,
'get_trt_compile_version'
,
'get_trt_runtime_version'
,
'get_num_bytes_of_data_type'
,
'PredictorPool'
]
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录