Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
d893810f
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 1 年 前同步成功
通知
281
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
d893810f
编写于
1月 15, 2019
作者:
W
wuzewu
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
process muti signature
上级
a7967cef
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
31 addition
and
36 deletion
+31
-36
paddle_hub/module.py
paddle_hub/module.py
+31
-36
未找到文件。
paddle_hub/module.py
浏览文件 @
d893810f
...
@@ -74,33 +74,6 @@ class Module(object):
...
@@ -74,33 +74,6 @@ class Module(object):
self
.
module_name
=
module_dir
.
split
(
"/"
)[
-
1
]
self
.
module_name
=
module_dir
.
split
(
"/"
)[
-
1
]
#TODO(ZeyuChen) add more check about loading module from local path
#TODO(ZeyuChen) add more check about loading module from local path
# load paddle inference model
place
=
fluid
.
CPUPlace
()
model_dir
=
os
.
path
.
join
(
self
.
module_dir
,
MODEL_DIRNAME
)
self
.
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
[
self
.
inference_program
,
self
.
feed_target_names
,
self
.
fetch_targets
]
=
fluid
.
io
.
load_inference_model
(
dirname
=
model_dir
,
executor
=
self
.
exe
)
# remove feed fetch operator and variable
ModuleUtils
.
remove_feed_fetch_op
(
self
.
inference_program
)
# print("inference_program")
# print(self.inference_program)
print
(
"**feed_target_names**
\n
{}"
.
format
(
self
.
feed_target_names
))
print
(
"**fetch_targets**
\n
{}"
.
format
(
self
.
fetch_targets
))
self
.
config
=
ModuleConfig
(
self
.
module_dir
)
self
.
config
.
load
()
self
.
_process_parameter
()
#TODO(wuzewu): recover the default unique name generator someother where
self
.
_process_uqn
()
def
_process_uqn
(
self
):
name_generator_path
=
ModuleConfig
.
name_generator_path
(
self
.
module_dir
)
with
open
(
name_generator_path
,
"rb"
)
as
fi
:
fluid
.
unique_name
.
switch
(
pickle
.
load
(
fi
))
def
_process_parameter
(
self
):
def
_process_parameter
(
self
):
global_block
=
self
.
inference_program
.
global_block
()
global_block
=
self
.
inference_program
.
global_block
()
param_path
=
ModuleConfig
.
meta_param_path
(
self
.
module_dir
)
param_path
=
ModuleConfig
.
meta_param_path
(
self
.
module_dir
)
...
@@ -133,12 +106,33 @@ class Module(object):
...
@@ -133,12 +106,33 @@ class Module(object):
if
op
.
has_attr
(
"is_test"
):
if
op
.
has_attr
(
"is_test"
):
op
.
_set_attr
(
"is_test"
,
is_test
)
op
.
_set_attr
(
"is_test"
,
is_test
)
# load paddle inference model
place
=
fluid
.
CPUPlace
()
model_dir
=
os
.
path
.
join
(
self
.
module_dir
,
MODEL_DIRNAME
)
self
.
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
self
.
inference_program
,
self
.
feed_target_names
,
self
.
fetch_targets
=
fluid
.
io
.
load_inference_model
(
dirname
=
os
.
path
.
join
(
model_dir
,
sign_name
,
executor
=
self
.
exe
))
# remove feed fetch operator and variable
ModuleUtils
.
remove_feed_fetch_op
(
self
.
inference_program
)
# print("inference_program")
# print(self.inference_program)
print
(
"**feed_target_names**
\n
{}"
.
format
(
self
.
feed_target_names
))
print
(
"**fetch_targets**
\n
{}"
.
format
(
self
.
fetch_targets
))
self
.
config
=
ModuleConfig
(
self
.
module_dir
)
self
.
config
.
load
()
self
.
_process_parameter
()
name_generator_path
=
ModuleConfig
.
name_generator_path
(
self
.
module_dir
)
with
open
(
name_generator_path
,
"rb"
)
as
data
:
generator
=
pickle
.
load
(
data
)
program
=
self
.
get_inference_program
().
clone
()
program
=
self
.
get_inference_program
().
clone
()
_process_op_attr
(
program
=
program
,
is_test
=
False
)
_process_op_attr
(
program
=
program
,
is_test
=
False
)
_set_param_trainable
(
program
=
program
,
trainable
=
trainable
)
_set_param_trainable
(
program
=
program
,
trainable
=
trainable
)
return
self
.
feed_target_names
,
self
.
fetch_targets
,
program
return
self
.
feed_target_names
,
self
.
fetch_targets
,
program
,
generator
def
get_inference_program
(
self
):
def
get_inference_program
(
self
):
return
self
.
inference_program
return
self
.
inference_program
...
@@ -323,14 +317,15 @@ def create_module(sign_arr, program, module_dir=None, word_dict=None):
...
@@ -323,14 +317,15 @@ def create_module(sign_arr, program, module_dir=None, word_dict=None):
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
exe
=
fluid
.
Executor
(
place
=
fluid
.
CPUPlace
())
model_dir
=
os
.
path
.
join
(
module_dir
,
"model"
)
model_dir
=
os
.
path
.
join
(
module_dir
,
"model"
)
mkdir
(
model_dir
)
mkdir
(
model_dir
)
# TODO(ZeyuChen): here only deal with one signature
# TODO(wuzewu): save paddle model with a more effective way
first_sign
=
sign_arr
[
0
]
for
sign
in
sign_arr
:
fluid
.
io
.
save_inference_model
(
save_model_dir
=
os
.
path
.
join
(
model_dir
,
sign
.
get_name
())
model_dir
,
fluid
.
io
.
save_inference_model
(
feeded_var_names
=
[
var
.
name
for
var
in
first_sign
.
get_inputs
()],
save_model_dir
,
target_vars
=
first_sign
.
get_outputs
(),
feeded_var_names
=
[
var
.
name
for
var
in
sign
.
get_inputs
()],
main_program
=
program
,
target_vars
=
sign
.
get_outputs
(),
executor
=
exe
)
main_program
=
program
,
executor
=
exe
)
# save to disk
# save to disk
data
=
module
.
SerializeToString
()
data
=
module
.
SerializeToString
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录