Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
PaddleRec
提交
93e5453c
P
PaddleRec
项目概览
BaiXuePrincess
/
PaddleRec
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleRec
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleRec
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
93e5453c
编写于
5月 29, 2020
作者:
F
frankwhzhang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix gru4rec
上级
66be4d32
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
85 addition
and
127 deletion
+85
-127
models/recall/gru4rec/config.yaml
models/recall/gru4rec/config.yaml
+50
-38
models/recall/gru4rec/model.py
models/recall/gru4rec/model.py
+35
-47
models/recall/gru4rec/rsc15_infer_reader.py
models/recall/gru4rec/rsc15_infer_reader.py
+0
-42
未找到文件。
models/recall/gru4rec/config.yaml
浏览文件 @
93e5453c
...
...
@@ -12,31 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
evaluate
:
reader
:
batch_size
:
1
class
:
"
{workspace}/rsc15_infer_reader.py"
test_data_path
:
"
{workspace}/data/train"
is_return_numpy
:
False
workspace
:
"
paddlerec.models.recall.gru4rec"
train
:
trainer
:
# for cluster training
strategy
:
"
async"
epochs
:
3
workspace
:
"
paddlerec.models.recall.gru4rec"
device
:
cpu
reader
:
dataset
:
-
name
:
dataset_train
batch_size
:
5
type
:
QueueDataset
data_path
:
"
{workspace}/data/train"
data_converter
:
"
{workspace}/rsc15_reader.py"
-
name
:
dataset_infer
batch_size
:
5
class
:
"
{workspace}/rsc15_reader.py"
train_data_path
:
"
{workspace}/data/train"
type
:
QueueDataset
data_path
:
"
{workspace}/data/test"
data_converter
:
"
{workspace}/rsc15_reader.py"
model
:
models
:
"
{workspace}/model.py"
hyper_parameters
:
hyper_parameters
:
vocab_size
:
1000
hid_size
:
100
emb_lr_x
:
10.0
...
...
@@ -44,15 +34,37 @@ train:
fc_lr_x
:
1.0
init_low_bound
:
-0.04
init_high_bound
:
0.04
optimizer
:
class
:
adagrad
learning_rate
:
0.01
optimizer
:
adagrad
strategy
:
async
#use infer_runner mode and modify 'phase' below if infer
mode
:
train_runner
#mode: infer_runner
runner
:
-
name
:
train_runner
class
:
single_train
device
:
cpu
epochs
:
3
save_checkpoint_interval
:
2
save_inference_interval
:
4
save_checkpoint_path
:
"
increment"
save_inference_path
:
"
inference"
print_interval
:
10
-
name
:
infer_runner
class
:
single_infer
init_model_path
:
"
increment/0"
device
:
cpu
epochs
:
3
sav
e
:
increment
:
dirname
:
"
increment
"
epoch_interval
:
2
save_last
:
True
inference
:
dirname
:
"
inference
"
epoch_interval
:
4
save_last
:
True
phas
e
:
-
name
:
train
model
:
"
{workspace}/model.py
"
dataset_name
:
dataset_train
thread_num
:
1
#- name: infer
# model: "{workspace}/model.py
"
# dataset_name: dataset_infer
# thread_num: 1
models/recall/gru4rec/model.py
浏览文件 @
93e5453c
...
...
@@ -22,84 +22,72 @@ class Model(ModelBase):
def
__init__
(
self
,
config
):
ModelBase
.
__init__
(
self
,
config
)
def
all_vocab_network
(
self
,
is_infer
=
False
):
""" network definition """
recall_k
=
envs
.
get_global_env
(
"hyper_parameters.recall_k"
,
None
,
self
.
_namespace
)
vocab_size
=
envs
.
get_global_env
(
"hyper_parameters.vocab_size"
,
None
,
self
.
_namespace
)
hid_size
=
envs
.
get_global_env
(
"hyper_parameters.hid_size"
,
None
,
self
.
_namespace
)
init_low_bound
=
envs
.
get_global_env
(
"hyper_parameters.init_low_bound"
,
None
,
self
.
_namespace
)
init_high_bound
=
envs
.
get_global_env
(
"hyper_parameters.init_high_bound"
,
None
,
self
.
_namespace
)
emb_lr_x
=
envs
.
get_global_env
(
"hyper_parameters.emb_lr_x"
,
None
,
self
.
_namespace
)
gru_lr_x
=
envs
.
get_global_env
(
"hyper_parameters.gru_lr_x"
,
None
,
self
.
_namespace
)
fc_lr_x
=
envs
.
get_global_env
(
"hyper_parameters.fc_lr_x"
,
None
,
self
.
_namespace
)
def
_init_hyper_parameters
(
self
):
self
.
recall_k
=
envs
.
get_global_env
(
"hyper_parameters.recall_k"
)
self
.
vocab_size
=
envs
.
get_global_env
(
"hyper_parameters.vocab_size"
)
self
.
hid_size
=
envs
.
get_global_env
(
"hyper_parameters.hid_size"
)
self
.
init_low_bound
=
envs
.
get_global_env
(
"hyper_parameters.init_low_bound"
)
self
.
init_high_bound
=
envs
.
get_global_env
(
"hyper_parameters.init_high_bound"
)
self
.
emb_lr_x
=
envs
.
get_global_env
(
"hyper_parameters.emb_lr_x"
)
self
.
gru_lr_x
=
envs
.
get_global_env
(
"hyper_parameters.gru_lr_x"
)
self
.
fc_lr_x
=
envs
.
get_global_env
(
"hyper_parameters.fc_lr_x"
)
def
input_data
(
self
,
is_infer
=
False
,
**
kwargs
):
# Input data
src_wordseq
=
fluid
.
data
(
name
=
"src_wordseq"
,
shape
=
[
None
,
1
],
dtype
=
"int64"
,
lod_level
=
1
)
dst_wordseq
=
fluid
.
data
(
name
=
"dst_wordseq"
,
shape
=
[
None
,
1
],
dtype
=
"int64"
,
lod_level
=
1
)
if
is_infer
:
self
.
_infer_data_var
=
[
src_wordseq
,
dst_wordseq
]
self
.
_infer_data_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
feed_list
=
self
.
_infer_data_var
,
capacity
=
64
,
use_double_buffer
=
False
,
iterable
=
False
)
return
[
src_wordseq
,
dst_wordseq
]
def
net
(
self
,
inputs
,
is_infer
=
False
):
src_wordseq
=
inputs
[
0
]
dst_wordseq
=
inputs
[
1
]
emb
=
fluid
.
embedding
(
input
=
src_wordseq
,
size
=
[
vocab_size
,
hid_size
],
size
=
[
self
.
vocab_size
,
self
.
hid_size
],
param_attr
=
fluid
.
ParamAttr
(
name
=
"emb"
,
initializer
=
fluid
.
initializer
.
Uniform
(
low
=
init_low_bound
,
high
=
init_high_bound
),
learning_rate
=
emb_lr_x
),
low
=
self
.
init_low_bound
,
high
=
self
.
init_high_bound
),
learning_rate
=
self
.
emb_lr_x
),
is_sparse
=
True
)
fc0
=
fluid
.
layers
.
fc
(
input
=
emb
,
size
=
hid_size
*
3
,
size
=
self
.
hid_size
*
3
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
low
=
init_low_bound
,
high
=
init_high_bound
),
learning_rate
=
gru_lr_x
))
low
=
self
.
init_low_bound
,
high
=
self
.
init_high_bound
),
learning_rate
=
self
.
gru_lr_x
))
gru_h0
=
fluid
.
layers
.
dynamic_gru
(
input
=
fc0
,
size
=
hid_size
,
size
=
self
.
hid_size
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
low
=
init_low_bound
,
high
=
init_high_bound
),
learning_rate
=
gru_lr_x
))
low
=
self
.
init_low_bound
,
high
=
self
.
init_high_bound
),
learning_rate
=
self
.
gru_lr_x
))
fc
=
fluid
.
layers
.
fc
(
input
=
gru_h0
,
size
=
vocab_size
,
size
=
self
.
vocab_size
,
act
=
'softmax'
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
low
=
init_low_bound
,
high
=
init_high_bound
),
learning_rate
=
fc_lr_x
))
low
=
self
.
init_low_bound
,
high
=
self
.
init_high_bound
),
learning_rate
=
self
.
fc_lr_x
))
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
fc
,
label
=
dst_wordseq
)
acc
=
fluid
.
layers
.
accuracy
(
input
=
fc
,
label
=
dst_wordseq
,
k
=
recall_k
)
acc
=
fluid
.
layers
.
accuracy
(
input
=
fc
,
label
=
dst_wordseq
,
k
=
self
.
recall_k
)
if
is_infer
:
self
.
_infer_results
[
'recall20'
]
=
acc
return
avg_cost
=
fluid
.
layers
.
mean
(
x
=
cost
)
self
.
_data_var
.
append
(
src_wordseq
)
self
.
_data_var
.
append
(
dst_wordseq
)
self
.
_cost
=
avg_cost
self
.
_metrics
[
"cost"
]
=
avg_cost
self
.
_metrics
[
"acc"
]
=
acc
def
train_net
(
self
):
self
.
all_vocab_network
()
def
infer_net
(
self
):
self
.
all_vocab_network
(
is_infer
=
True
)
models/recall/gru4rec/rsc15_infer_reader.py
已删除
100644 → 0
浏览文件 @
66be4d32
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
from
paddlerec.core.reader
import
Reader
class
EvaluateReader
(
Reader
):
def
init
(
self
):
pass
def
generate_sample
(
self
,
line
):
"""
Read the data line by line and process it as a dictionary
"""
def
reader
():
"""
This function needs to be implemented by the user, based on data format
"""
l
=
line
.
strip
().
split
()
l
=
[
w
for
w
in
l
]
src_seq
=
l
[:
len
(
l
)
-
1
]
src_seq
=
[
int
(
e
)
for
e
in
src_seq
]
trg_seq
=
l
[
1
:]
trg_seq
=
[
int
(
e
)
for
e
in
trg_seq
]
feature_name
=
[
"src_wordseq"
,
"dst_wordseq"
]
yield
zip
(
feature_name
,
[
src_seq
]
+
[
trg_seq
])
return
reader
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录