Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
SummerGao.
Paddle
提交
8ad635d5
P
Paddle
项目概览
SummerGao.
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
前往新版Gitcode,体验更适合开发者的 AI 搜索 >>
未验证
提交
8ad635d5
编写于
2月 20, 2023
作者:
J
JingZhuangzhuang
提交者:
GitHub
2月 20, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
share_data interface support paddle.Tensor type (#50240)
上级
c36c7199
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
121 addition
and
15 deletion
+121
-15
paddle/fluid/pybind/inference_api.cc
paddle/fluid/pybind/inference_api.cc
+67
-9
python/paddle/fluid/tests/unittests/test_inference_api.py
python/paddle/fluid/tests/unittests/test_inference_api.py
+42
-3
python/paddle/inference/wrapper.py
python/paddle/inference/wrapper.py
+12
-3
未找到文件。
paddle/fluid/pybind/inference_api.cc
浏览文件 @
8ad635d5
...
...
@@ -37,6 +37,9 @@
#include "paddle/fluid/inference/api/paddle_inference_api.h"
#include "paddle/fluid/inference/api/paddle_pass_builder.h"
#include "paddle/fluid/inference/utils/io_utils.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/core/compat/convert_utils.h"
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
...
...
@@ -259,6 +262,55 @@ void PaddleInferShareExternalData(paddle_infer::Tensor &tensor, // NOLINT
static_cast
<
paddle
::
platform
::
float16
*>
(
input_tensor
.
data
()),
shape
,
ToPaddleInferPlace
(
input_tensor
.
place
().
GetType
()));
}
else
if
(
input_tensor
.
dtype
()
==
phi
::
DataType
::
INT32
)
{
tensor
.
ShareExternalData
(
static_cast
<
int32_t
*>
(
input_tensor
.
data
()),
shape
,
ToPaddleInferPlace
(
input_tensor
.
place
().
GetType
()));
}
else
if
(
input_tensor
.
dtype
()
==
phi
::
DataType
::
INT64
)
{
tensor
.
ShareExternalData
(
static_cast
<
int64_t
*>
(
input_tensor
.
data
()),
shape
,
ToPaddleInferPlace
(
input_tensor
.
place
().
GetType
()));
}
else
{
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
"Unsupported data type. Now share_external_data only supports INT32, "
"INT64, FLOAT32 and FLOAT16."
));
}
}
void
PaddleTensorShareExternalData
(
paddle_infer
::
Tensor
&
tensor
,
// NOLINT
paddle
::
experimental
::
Tensor
&&
paddle_tensor
)
{
std
::
vector
<
int
>
shape
;
for
(
int
i
=
0
;
i
<
paddle_tensor
.
dims
().
size
();
++
i
)
{
shape
.
push_back
(
paddle_tensor
.
dims
()[
i
]);
}
if
(
paddle_tensor
.
dtype
()
==
paddle
::
experimental
::
DataType
::
FLOAT32
)
{
tensor
.
ShareExternalData
(
static_cast
<
float
*>
(
paddle_tensor
.
data
<
float
>
()),
shape
,
ToPaddleInferPlace
(
paddle_tensor
.
place
().
GetType
()));
}
else
if
(
paddle_tensor
.
dtype
()
==
paddle
::
experimental
::
DataType
::
FLOAT16
)
{
tensor
.
ShareExternalData
(
static_cast
<
paddle
::
platform
::
float16
*>
(
paddle_tensor
.
data
<
paddle
::
platform
::
float16
>
()),
shape
,
ToPaddleInferPlace
(
paddle_tensor
.
place
().
GetType
()));
}
else
if
(
paddle_tensor
.
dtype
()
==
paddle
::
experimental
::
DataType
::
INT32
)
{
tensor
.
ShareExternalData
(
static_cast
<
int32_t
*>
(
paddle_tensor
.
data
<
int32_t
>
()),
shape
,
ToPaddleInferPlace
(
paddle_tensor
.
place
().
GetType
()));
}
else
if
(
paddle_tensor
.
dtype
()
==
paddle
::
experimental
::
DataType
::
INT64
)
{
tensor
.
ShareExternalData
(
static_cast
<
int64_t
*>
(
paddle_tensor
.
data
<
int64_t
>
()),
shape
,
ToPaddleInferPlace
(
paddle_tensor
.
place
().
GetType
()));
}
else
{
PADDLE_THROW
(
platform
::
errors
::
Unimplemented
(
"Unsupported data type. Now share_external_data only supports INT32, "
"INT64, FLOAT32 and FLOAT16."
));
}
}
...
...
@@ -1043,16 +1095,22 @@ void BindPaddleInferTensor(py::module *m) {
.
def
(
"reshape"
,
py
::
overload_cast
<
const
std
::
size_t
&>
(
&
paddle_infer
::
Tensor
::
ReshapeStrings
))
.
def
(
"copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
int8_t
>
)
.
def
(
"copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
uint8_t
>
)
.
def
(
"copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
int32_t
>
)
.
def
(
"copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
int64_t
>
)
.
def
(
"copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
float
>
)
.
def
(
"copy_from_cpu_bind"
,
.
def
(
"
_
copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
int8_t
>
)
.
def
(
"
_
copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
uint8_t
>
)
.
def
(
"
_
copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
int32_t
>
)
.
def
(
"
_
copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
int64_t
>
)
.
def
(
"
_
copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
float
>
)
.
def
(
"
_
copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
paddle_infer
::
float16
>
)
.
def
(
"copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
bool
>
)
.
def
(
"copy_from_cpu_bind"
,
&
PaddleInferStringTensorCreate
)
.
def
(
"share_external_data_bind"
,
&
PaddleInferShareExternalData
)
.
def
(
"_copy_from_cpu_bind"
,
&
PaddleInferTensorCreate
<
bool
>
)
.
def
(
"_copy_from_cpu_bind"
,
&
PaddleInferStringTensorCreate
)
.
def
(
"_share_external_data_bind"
,
&
PaddleInferShareExternalData
)
.
def
(
"_share_external_data_paddle_tensor_bind"
,
[](
paddle_infer
::
Tensor
&
self
,
const
py
::
handle
&
input
)
{
PyObject
*
obj
=
input
.
ptr
();
PaddleTensorShareExternalData
(
self
,
std
::
move
(
CastPyArg2Tensor
(
obj
,
0
)));
})
.
def
(
"copy_to_cpu"
,
&
PaddleInferTensorToNumpy
)
.
def
(
"shape"
,
&
paddle_infer
::
Tensor
::
shape
)
.
def
(
"set_lod"
,
&
paddle_infer
::
Tensor
::
SetLoD
)
...
...
python/paddle/fluid/tests/unittests/test_inference_api.py
浏览文件 @
8ad635d5
...
...
@@ -119,16 +119,55 @@ class TestInferenceBaseAPI(unittest.TestCase):
predictor
.
run
()
def
test_wrong_input
(
self
):
program
,
params
=
get_sample_model
()
config
=
self
.
get_config
(
program
,
params
)
predictor
=
create_predictor
(
config
)
in_names
=
predictor
.
get_input_names
()
in_handle
=
predictor
.
get_input_handle
(
in_names
[
0
])
with
self
.
assertRaises
(
TypeError
):
program
,
params
=
get_sample_model
()
in_data
=
np
.
ones
((
1
,
6
,
64
,
64
)).
astype
(
np
.
float32
)
in_handle
.
copy_from_cpu
(
list
(
in_data
))
predictor
.
run
()
with
self
.
assertRaises
(
TypeError
):
in_handle
.
share_external_data
(
paddle
.
to_tensor
(
np
.
full
((
1
,
6
,
32
,
32
),
1.0
,
"float32"
),
place
=
paddle
.
CPUPlace
(),
)
)
predictor
.
run
()
def
test_share_external_data
(
self
):
program
,
params
=
get_sample_model
()
def
test_lod_tensor
():
config
=
Config
()
config
.
set_model_buffer
(
program
,
len
(
program
),
params
,
len
(
params
))
predictor
=
create_predictor
(
config
)
in_names
=
predictor
.
get_input_names
()
in_handle
=
predictor
.
get_input_handle
(
in_names
[
0
])
in_data
=
paddle
.
fluid
.
create_lod_tensor
(
np
.
full
((
1
,
6
,
32
,
32
),
1.0
,
"float32"
),
[[
1
]],
paddle
.
fluid
.
CPUPlace
(),
)
in_handle
.
share_external_data
(
in_data
)
predictor
.
run
()
def
test_paddle_tensor
():
config
=
self
.
get_config
(
program
,
params
)
predictor
=
create_predictor
(
config
)
in_names
=
predictor
.
get_input_names
()
in_handle
=
predictor
.
get_input_handle
(
in_names
[
0
])
in_data
=
np
.
ones
((
1
,
6
,
64
,
64
)).
astype
(
np
.
float32
)
in_handle
.
copy_from_cpu
(
list
(
in_data
)
)
in_data
=
paddle
.
Tensor
(
np
.
ones
((
1
,
6
,
32
,
32
)).
astype
(
np
.
float32
)
)
in_handle
.
share_external_data
(
in_data
)
predictor
.
run
()
test_lod_tensor
()
test_paddle_tensor
()
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/inference/wrapper.py
浏览文件 @
8ad635d5
...
...
@@ -17,6 +17,7 @@ from typing import Set
import
numpy
as
np
import
paddle
import
paddle.fluid.core
as
core
from
paddle.fluid.core
import
(
AnalysisConfig
,
...
...
@@ -42,7 +43,7 @@ def tensor_copy_from_cpu(self, data):
if
isinstance
(
data
,
np
.
ndarray
)
or
(
isinstance
(
data
,
list
)
and
len
(
data
)
>
0
and
isinstance
(
data
[
0
],
str
)
):
self
.
copy_from_cpu_bind
(
data
)
self
.
_
copy_from_cpu_bind
(
data
)
else
:
raise
TypeError
(
"In copy_from_cpu, we only support numpy ndarray and list[str] data type."
...
...
@@ -54,10 +55,18 @@ def tensor_share_external_data(self, data):
Support input type check based on tensor.share_external_data.
'''
if
isinstance
(
data
,
core
.
LoDTensor
):
self
.
share_external_data_bind
(
data
)
self
.
_share_external_data_bind
(
data
)
elif
isinstance
(
data
,
paddle
.
Tensor
):
self
.
_share_external_data_paddle_tensor_bind
(
data
)
elif
isinstance
(
data
,
paddle
.
fluid
.
framework
.
Variable
):
raise
TypeError
(
"The interface 'share_external_data' can only be used in dynamic graph mode. "
"Maybe you called 'paddle.enable_static()' and you are in static graph mode now. "
"Please use 'copy_from_cpu' instead."
)
else
:
raise
TypeError
(
"In share_external_data, we only support
LoDTensor data type
."
"In share_external_data, we only support
Tensor and LoDTensor
."
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录