Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
cc75e84d
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
cc75e84d
编写于
5月 10, 2018
作者:
K
Kexin Zhao
提交者:
GitHub
5月 10, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #10541 from kexinzhao/load_fp16
Add float16 support to load op
上级
28de0ea4
aa2635fe
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
79 addition
and
1 deletion
+79
-1
paddle/fluid/operators/load_op.cc
paddle/fluid/operators/load_op.cc
+29
-0
paddle/fluid/operators/save_load_op_test.cc
paddle/fluid/operators/save_load_op_test.cc
+50
-1
未找到文件。
paddle/fluid/operators/load_op.cc
浏览文件 @
cc75e84d
...
...
@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include <fstream>
#include "paddle/fluid/framework/data_type_transform.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/profiler.h"
...
...
@@ -46,6 +47,27 @@ class LoadOp : public framework::OperatorBase {
auto
*
tensor
=
out_var
->
GetMutable
<
framework
::
LoDTensor
>
();
DeserializeFromStream
(
fin
,
tensor
,
*
dev_ctx
);
auto
load_as_fp16
=
Attr
<
bool
>
(
"load_as_fp16"
);
auto
in_dtype
=
framework
::
ToDataType
(
tensor
->
type
());
auto
out_dtype
=
load_as_fp16
?
framework
::
proto
::
VarType
::
FP16
:
in_dtype
;
if
(
in_dtype
!=
out_dtype
)
{
// convert to float16 tensor
auto
in_kernel_type
=
framework
::
OpKernelType
(
in_dtype
,
place
);
auto
out_kernel_type
=
framework
::
OpKernelType
(
out_dtype
,
place
);
framework
::
LoDTensor
fp16_tensor
;
// copy LoD info to the new tensor
fp16_tensor
.
set_lod
(
tensor
->
lod
());
framework
::
TransDataType
(
in_kernel_type
,
out_kernel_type
,
*
tensor
,
&
fp16_tensor
);
// reset output tensor
out_var
->
Clear
();
tensor
=
out_var
->
GetMutable
<
framework
::
LoDTensor
>
();
tensor
->
set_lod
(
fp16_tensor
.
lod
());
tensor
->
ShareDataWith
(
fp16_tensor
);
}
}
};
...
...
@@ -54,6 +76,13 @@ class LoadOpProtoMaker : public framework::OpProtoAndCheckerMaker {
LoadOpProtoMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddOutput
(
"Out"
,
"(Tensor) The tensor need to be loaded"
);
AddAttr
<
bool
>
(
"load_as_fp16"
,
"(boolean, default false)"
"If true, the tensor will be first loaded and then "
"converted to float16 data type. Otherwise, the tensor will be "
"directly loaded without data type conversion."
)
.
SetDefault
(
false
);
AddAttr
<
std
::
string
>
(
"file_path"
,
"(string) "
"Variable will be loaded from
\"
file_path
\"
."
)
...
...
paddle/fluid/operators/save_load_op_test.cc
浏览文件 @
cc75e84d
...
...
@@ -63,7 +63,7 @@ TEST(SaveLoadOp, CPU) {
}
}
TEST
(
Save
Load
FP16Op
,
CPU
)
{
TEST
(
SaveFP16Op
,
CPU
)
{
paddle
::
framework
::
Scope
scope
;
paddle
::
platform
::
CPUPlace
place
;
...
...
@@ -94,3 +94,52 @@ TEST(SaveLoadFP16Op, CPU) {
EXPECT_EQ
(
expect
[
i
],
static_cast
<
float
>
(
actual
[
i
]));
}
}
TEST
(
LoadFP16Op
,
CPU
)
{
paddle
::
framework
::
Scope
scope
;
paddle
::
platform
::
CPUPlace
place
;
auto
var
=
scope
.
Var
(
"test_var"
);
auto
tensor
=
var
->
GetMutable
<
paddle
::
framework
::
LoDTensor
>
();
tensor
->
Resize
({
3
,
10
});
paddle
::
framework
::
LoD
expect_lod
;
expect_lod
.
resize
(
1
);
expect_lod
[
0
].
push_back
(
0
);
expect_lod
[
0
].
push_back
(
1
);
expect_lod
[
0
].
push_back
(
2
);
expect_lod
[
0
].
push_back
(
3
);
tensor
->
set_lod
(
expect_lod
);
float
*
expect
=
tensor
->
mutable_data
<
float
>
(
place
);
for
(
int64_t
i
=
0
;
i
<
tensor
->
numel
();
++
i
)
{
expect
[
i
]
=
static_cast
<
float
>
(
paddle
::
platform
::
float16
(
i
));
}
paddle
::
framework
::
AttributeMap
attrs
;
attrs
.
insert
({
"file_path"
,
std
::
string
(
"tensor.save"
)});
attrs
.
insert
({
"load_as_fp16"
,
true
});
auto
save_op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
"save"
,
{{
"X"
,
{
"test_var"
}}},
{},
attrs
);
save_op
->
Run
(
scope
,
place
);
auto
load_var
=
scope
.
Var
(
"out_var"
);
auto
load_op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
"load"
,
{},
{{
"Out"
,
{
"out_var"
}}},
attrs
);
load_op
->
Run
(
scope
,
place
);
auto
target
=
load_var
->
Get
<
paddle
::
framework
::
LoDTensor
>
();
paddle
::
platform
::
float16
*
actual
=
target
.
data
<
paddle
::
platform
::
float16
>
();
for
(
int64_t
i
=
0
;
i
<
tensor
->
numel
();
++
i
)
{
EXPECT_EQ
(
expect
[
i
],
static_cast
<
float
>
(
actual
[
i
]));
}
auto
&
actual_lod
=
target
.
lod
();
EXPECT_EQ
(
expect_lod
.
size
(),
actual_lod
.
size
());
for
(
size_t
i
=
0
;
i
<
expect_lod
.
size
();
++
i
)
{
for
(
size_t
j
=
0
;
j
<
expect_lod
[
i
].
size
();
++
j
)
{
EXPECT_EQ
(
expect_lod
[
i
][
j
],
actual_lod
[
i
][
j
]);
}
}
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录