Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
62fd63c9
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 1 年 前同步成功
通知
282
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
62fd63c9
编写于
1月 03, 2019
作者:
Z
Zeyu Chen
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add module_desc.proto and fix senta example usage issues
上级
5373f2d9
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
18 addition
and
16 deletion
+18
-16
Senta/sentiment_classify.py
Senta/sentiment_classify.py
+9
-10
paddle_hub/__init__.py
paddle_hub/__init__.py
+1
-0
paddle_hub/module.py
paddle_hub/module.py
+8
-6
未找到文件。
Senta/sentiment_classify.py
浏览文件 @
62fd63c9
...
@@ -152,10 +152,12 @@ def train_net(train_reader,
...
@@ -152,10 +152,12 @@ def train_net(train_reader,
(
pass_id
,
avg_acc
,
avg_cost
))
(
pass_id
,
avg_acc
,
avg_cost
))
# save the model
# save the model
module_path
=
os
.
path
.
join
(
save_dirname
,
network_name
)
module_dir
=
os
.
path
.
join
(
save_dirname
,
network_name
)
hub
.
ModuleDesc
.
save_module_dict
(
fluid
.
io
.
save_inference_model
(
module_dir
,
[
"words"
],
emb
,
exe
)
module_path
=
module_path
,
word_dict
=
word_dict
)
fluid
.
io
.
save_inference_model
(
module_path
,
[
"words"
],
emb
,
exe
)
config
=
hub
.
ModuleConfig
(
module_dir
)
config
.
save_dict
(
word_dict
=
word_dict
)
config
.
dump
()
def
retrain_net
(
train_reader
,
def
retrain_net
(
train_reader
,
...
@@ -209,10 +211,7 @@ def retrain_net(train_reader,
...
@@ -209,10 +211,7 @@ def retrain_net(train_reader,
#TODO(ZeyuChen): how to get output paramter according to proto config
#TODO(ZeyuChen): how to get output paramter according to proto config
emb
=
module
.
get_module_output
()
emb
=
module
.
get_module_output
()
print
(
# # embedding layer
"adfjkajdlfjoqi jqiorejlmsfdlkjoi jqwierjoajsdklfjoi qjerijoajdfiqwjeor adfkalsf"
)
# # # embedding layer
# emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
# emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
# #input=data, size=[dict_dim, emb_dim], param_attr="bow_embedding")
# #input=data, size=[dict_dim, emb_dim], param_attr="bow_embedding")
# # bow layer
# # bow layer
...
@@ -264,8 +263,8 @@ def retrain_net(train_reader,
...
@@ -264,8 +263,8 @@ def retrain_net(train_reader,
# print("senta_load_module", fluid.default_main_program())
# print("senta_load_module", fluid.default_main_program())
# save the model
# save the model
module_
path
=
os
.
path
.
join
(
save_dirname
,
network_name
+
"_retrain"
)
module_
dir
=
os
.
path
.
join
(
save_dirname
,
network_name
+
"_retrain"
)
fluid
.
io
.
save_inference_model
(
module_
path
,
[
"words"
],
emb
,
exe
)
fluid
.
io
.
save_inference_model
(
module_
dir
,
[
"words"
],
emb
,
exe
)
def
eval_net
(
test_reader
,
use_gpu
,
model_path
=
None
):
def
eval_net
(
test_reader
,
use_gpu
,
model_path
=
None
):
...
...
paddle_hub/__init__.py
浏览文件 @
62fd63c9
...
@@ -6,4 +6,5 @@ import paddle.fluid as fluid
...
@@ -6,4 +6,5 @@ import paddle.fluid as fluid
from
paddle_hub.module
import
Module
from
paddle_hub.module
import
Module
from
paddle_hub.module
import
ModuleConfig
from
paddle_hub.module
import
ModuleConfig
from
paddle_hub.module
import
ModuleUtils
from
paddle_hub.downloader
import
download_and_uncompress
from
paddle_hub.downloader
import
download_and_uncompress
paddle_hub/module.py
浏览文件 @
62fd63c9
...
@@ -46,15 +46,16 @@ class Module(object):
...
@@ -46,15 +46,16 @@ class Module(object):
module_url
)
module_url
)
else
:
else
:
# otherwise it's local path, no need to deal with it
# otherwise it's local path, no need to deal with it
print
(
"Module.__init__"
,
module_url
)
self
.
module_dir
=
module_url
self
.
module_dir
=
module_url
self
.
module_name
=
module_url
.
split
()[
-
1
]
self
.
module_name
=
module_url
.
split
(
"/"
)[
-
1
]
# load paddle inference model
# load paddle inference model
place
=
fluid
.
CPUPlace
()
place
=
fluid
.
CPUPlace
()
self
.
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
self
.
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
[
self
.
inference_program
,
self
.
feed_target_names
,
[
self
.
inference_program
,
self
.
feed_target_names
,
self
.
fetch_targets
]
=
fluid
.
io
.
load_inference_model
(
self
.
fetch_targets
]
=
fluid
.
io
.
load_inference_model
(
dirname
=
module_dir
,
executor
=
self
.
exe
)
dirname
=
self
.
module_dir
,
executor
=
self
.
exe
)
print
(
"inference_program"
)
print
(
"inference_program"
)
print
(
self
.
inference_program
)
print
(
self
.
inference_program
)
...
@@ -63,8 +64,8 @@ class Module(object):
...
@@ -63,8 +64,8 @@ class Module(object):
print
(
"fetch_targets"
)
print
(
"fetch_targets"
)
print
(
self
.
fetch_targets
)
print
(
self
.
fetch_targets
)
config
=
ModuleConfig
()
config
=
ModuleConfig
(
self
.
module_dir
)
config
.
load
(
self
.
module_dir
)
config
.
load
()
# load assets
# load assets
# self.dict = defaultdict(int)
# self.dict = defaultdict(int)
# self.dict.setdefault(0)
# self.dict.setdefault(0)
...
@@ -188,11 +189,12 @@ class ModuleConfig(object):
...
@@ -188,11 +189,12 @@ class ModuleConfig(object):
self
.
dict
=
defaultdict
(
int
)
self
.
dict
=
defaultdict
(
int
)
self
.
dict
.
setdefault
(
0
)
self
.
dict
.
setdefault
(
0
)
def
load
(
self
,
module_dir
):
def
load
(
self
):
"""load module config from module dir
"""load module config from module dir
"""
"""
#TODO(ZeyuChen): check module_desc.pb exsitance
#TODO(ZeyuChen): check module_desc.pb exsitance
with
open
(
pb_file_path
,
"rb"
)
as
fi
:
pb_path
=
os
.
path
.
join
(
self
.
module_dir
,
"module_desc.pb"
)
with
open
(
pb_path
,
"rb"
)
as
fi
:
self
.
desc
.
ParseFromString
(
fi
.
read
())
self
.
desc
.
ParseFromString
(
fi
.
read
())
if
self
.
desc
.
contain_assets
:
if
self
.
desc
.
contain_assets
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录