Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
9f9e591d
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
9f9e591d
编写于
4月 18, 2022
作者:
W
Wilber
提交者:
GitHub
4月 18, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove fluid memory pool (#41862)
上级
5a103150
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
123 addition
and
17 deletion
+123
-17
paddle/infrt/kernel/phi/dense_tensor_kernels.cc
paddle/infrt/kernel/phi/dense_tensor_kernels.cc
+17
-17
paddle/infrt/paddle/model_parser.cc
paddle/infrt/paddle/model_parser.cc
+95
-0
paddle/infrt/paddle/model_parser.h
paddle/infrt/paddle/model_parser.h
+11
-0
未找到文件。
paddle/infrt/kernel/phi/dense_tensor_kernels.cc
浏览文件 @
9f9e591d
...
...
@@ -13,7 +13,9 @@
// limitations under the License.
#include "paddle/infrt/kernel/phi/dense_tensor_kernels.h"
#include <memory>
#include "llvm/Support/ErrorHandling.h"
#include "paddle/infrt/backends/host/phi_allocator.h"
#include "paddle/infrt/common/string.h"
#include "paddle/infrt/dialect/phi/data_type.h"
#include "paddle/infrt/kernel/phi/context_kernels.h"
...
...
@@ -22,24 +24,13 @@
#include "paddle/infrt/tensor/tensor_map.h"
#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#ifdef INFRT_WITH_GPU
#include <cuda_runtime.h>
#endif
namespace
paddle
{
namespace
platform
{
using
DeviceContext
=
::
phi
::
DeviceContext
;
}
// namespace platform
namespace
framework
{
using
LoDTensor
=
::
phi
::
DenseTensor
;
void
DeserializeFromStream
(
std
::
istream
&
is
,
LoDTensor
*
tensor
,
const
platform
::
DeviceContext
&
dev_ctx
);
}
}
// namespace paddle
namespace
infrt
{
namespace
kernel
{
namespace
phi
{
...
...
@@ -198,6 +189,12 @@ void PrintDenseTensor(::phi::DenseTensor* dense_tensor) {
auto
pb_proto_prog
=
paddle
::
LoadProgram
(
model_path
);
auto
main_block
=
pb_proto_prog
->
blocks
(
0
);
::
phi
::
CPUContext
ctx
;
auto
allocator
=
std
::
make_unique
<
backends
::
CpuPhiAllocator
>
();
const
auto
*
allocator_ptr
=
allocator
.
get
();
ctx
.
SetAllocator
(
allocator_ptr
);
ctx
.
SetHostAllocator
(
allocator_ptr
);
ctx
.
SetZeroAllocator
(
allocator_ptr
);
for
(
auto
&
var
:
main_block
.
vars
())
{
if
(
var
.
name
()
==
"feed"
||
var
.
name
()
==
"fetch"
||
!
var
.
persistable
())
continue
;
...
...
@@ -207,9 +204,7 @@ void PrintDenseTensor(::phi::DenseTensor* dense_tensor) {
case
::
paddle
::
framework
::
proto
::
VarType_Type_LOD_TENSOR
:
{
std
::
unique_ptr
<::
phi
::
DenseTensor
>
tensor
{
std
::
make_unique
<::
phi
::
DenseTensor
>
()};
::
phi
::
CPUContext
ctx
;
::
paddle
::
framework
::
DeserializeFromStream
(
param_file
,
tensor
.
get
(),
ctx
);
::
infrt
::
paddle
::
DeserializeFromStream
(
param_file
,
tensor
.
get
(),
ctx
);
map
.
SetDenseTensor
(
var
.
name
(),
std
::
move
(
tensor
));
}
break
;
default:
{
...
...
@@ -249,11 +244,16 @@ void PrintDenseTensor(::phi::DenseTensor* dense_tensor) {
}
}
::
phi
::
CPUContext
ctx
;
auto
allocator
=
std
::
make_unique
<
backends
::
CpuPhiAllocator
>
();
const
auto
*
allocator_ptr
=
allocator
.
get
();
ctx
.
SetAllocator
(
allocator_ptr
);
ctx
.
SetHostAllocator
(
allocator_ptr
);
ctx
.
SetZeroAllocator
(
allocator_ptr
);
for
(
auto
&
var
:
tmp
)
{
std
::
unique_ptr
<::
phi
::
DenseTensor
>
tensor
{
std
::
make_unique
<::
phi
::
DenseTensor
>
()};
::
phi
::
CPUContext
ctx
;
::
paddle
::
framework
::
DeserializeFromStream
(
param_file
,
tensor
.
get
(),
ctx
);
::
infrt
::
paddle
::
DeserializeFromStream
(
param_file
,
tensor
.
get
(),
ctx
);
map
.
SetDenseTensor
(
var
,
std
::
move
(
tensor
));
}
...
...
paddle/infrt/paddle/model_parser.cc
浏览文件 @
9f9e591d
...
...
@@ -22,6 +22,10 @@
#include "paddle/infrt/common/target.h"
#include "paddle/infrt/common/type.h"
#ifdef INFRT_WITH_PHI
#include "paddle/phi/common/data_type.h"
#endif
namespace
infrt
{
namespace
paddle
{
...
...
@@ -170,5 +174,96 @@ void LoadParam(const std::string &path, _Variable *out, const Target &target) {
LoadLoDTensor
(
fin
,
out
,
target
);
}
#ifdef INFRT_WITH_PHI
namespace
framework_proto
=
::
paddle
::
framework
::
proto
;
inline
::
phi
::
DataType
PhiDataType
(
framework_proto
::
VarType
::
Type
type
)
{
using
Type
=
framework_proto
::
VarType
::
Type
;
switch
(
static_cast
<
int
>
(
type
))
{
case
Type
::
VarType_Type_BOOL
:
return
::
phi
::
DataType
::
BOOL
;
case
Type
::
VarType_Type_INT8
:
return
::
phi
::
DataType
::
INT8
;
case
Type
::
VarType_Type_UINT8
:
return
::
phi
::
DataType
::
UINT8
;
case
Type
::
VarType_Type_INT16
:
return
::
phi
::
DataType
::
INT16
;
case
Type
::
VarType_Type_INT32
:
return
::
phi
::
DataType
::
INT32
;
case
Type
::
VarType_Type_INT64
:
return
::
phi
::
DataType
::
INT64
;
case
Type
::
VarType_Type_SIZE_T
:
return
::
phi
::
DataType
::
UINT64
;
case
Type
::
VarType_Type_FP16
:
return
::
phi
::
DataType
::
FLOAT16
;
case
Type
::
VarType_Type_FP32
:
return
::
phi
::
DataType
::
FLOAT32
;
case
Type
::
VarType_Type_FP64
:
return
::
phi
::
DataType
::
FLOAT64
;
default:
LOG
(
FATAL
)
<<
"unknown data type "
<<
type
;
}
return
::
phi
::
DataType
::
UNDEFINED
;
}
inline
void
TensorFromStream
(
std
::
istream
&
is
,
::
phi
::
DenseTensor
*
tensor
,
const
::
phi
::
CPUContext
&
ctx
)
{
uint32_t
version
;
is
.
read
(
reinterpret_cast
<
char
*>
(
&
version
),
sizeof
(
version
));
CHECK_EQ
(
version
,
0U
);
framework_proto
::
VarType
::
TensorDesc
desc
;
{
// int32_t size
// proto buffer
int32_t
size
=
-
1
;
is
.
read
(
reinterpret_cast
<
char
*>
(
&
size
),
sizeof
(
size
));
CHECK_EQ
(
is
.
good
(),
true
);
CHECK_GE
(
size
,
0
);
std
::
unique_ptr
<
char
[]
>
buf
(
new
char
[
size
]);
is
.
read
(
reinterpret_cast
<
char
*>
(
buf
.
get
()),
size
);
CHECK_EQ
(
desc
.
ParseFromArray
(
buf
.
get
(),
size
),
true
);
}
{
// read tensor
std
::
vector
<
int64_t
>
dims
;
dims
.
reserve
(
static_cast
<
size_t
>
(
desc
.
dims
().
size
()));
std
::
copy
(
desc
.
dims
().
begin
(),
desc
.
dims
().
end
(),
std
::
back_inserter
(
dims
));
tensor
->
Resize
(
::
phi
::
make_ddim
(
dims
));
void
*
buf
;
size_t
size
=
tensor
->
numel
()
*
SizeOfType
(
desc
.
data_type
());
ctx
.
HostAlloc
(
tensor
,
PhiDataType
(
desc
.
data_type
()),
size
);
buf
=
tensor
->
data
();
is
.
read
(
static_cast
<
char
*>
(
buf
),
size
);
}
}
void
DeserializeFromStream
(
std
::
istream
&
is
,
::
phi
::
DenseTensor
*
tensor
,
const
::
phi
::
CPUContext
&
dev_ctx
)
{
{
// the 1st field, unit32_t version for LoDTensor
uint32_t
version
;
is
.
read
(
reinterpret_cast
<
char
*>
(
&
version
),
sizeof
(
version
));
CHECK_EQ
(
version
,
0U
);
}
{
// the 2st field, LoD information
uint64_t
lod_level
;
is
.
read
(
reinterpret_cast
<
char
*>
(
&
lod_level
),
sizeof
(
lod_level
));
auto
&
lod
=
*
tensor
->
mutable_lod
();
lod
.
resize
(
lod_level
);
for
(
uint64_t
i
=
0
;
i
<
lod_level
;
++
i
)
{
uint64_t
size
;
is
.
read
(
reinterpret_cast
<
char
*>
(
&
size
),
sizeof
(
size
));
std
::
vector
<
size_t
>
tmp
(
size
/
sizeof
(
size_t
));
is
.
read
(
reinterpret_cast
<
char
*>
(
tmp
.
data
()),
static_cast
<
std
::
streamsize
>
(
size
));
lod
[
i
]
=
tmp
;
}
}
// the 3st filed, Tensor
TensorFromStream
(
is
,
tensor
,
dev_ctx
);
}
#endif
}
// namespace paddle
}
// namespace infrt
paddle/infrt/paddle/model_parser.h
浏览文件 @
9f9e591d
...
...
@@ -25,6 +25,11 @@
#include "paddle/infrt/paddle/scope.h"
#include "paddle/infrt/paddle/tensor.h"
#ifdef INFRT_WITH_PHI
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#endif
namespace
infrt
{
namespace
paddle
{
namespace
framework_proto
=
::
paddle
::
framework
::
proto
;
...
...
@@ -53,5 +58,11 @@ void TensorFromStream(
const
common
::
Target
&
target
=
common
::
DefaultHostTarget
());
void
ReadBinaryFile
(
const
std
::
string
&
filename
,
std
::
string
*
contents
);
#ifdef INFRT_WITH_PHI
void
DeserializeFromStream
(
std
::
istream
&
is
,
::
phi
::
DenseTensor
*
tensor
,
const
::
phi
::
CPUContext
&
dev_ctx
);
#endif
}
// namespace paddle
}
// namespace infrt
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录