Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleRec
提交
0085a4f2
P
PaddleRec
项目概览
PaddlePaddle
/
PaddleRec
通知
68
Star
12
Fork
5
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
27
列表
看板
标记
里程碑
合并请求
10
Wiki
1
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleRec
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
27
Issue
27
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
1
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0085a4f2
编写于
5月 29, 2020
作者:
M
malin10
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update multiview-simnet
上级
f4ace1bf
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
284 addition
and
395 deletion
+284
-395
core/model.py
core/model.py
+5
-5
core/trainers/single_infer.py
core/trainers/single_infer.py
+1
-0
core/trainers/single_trainer.py
core/trainers/single_trainer.py
+1
-7
core/utils/dataset_instance.py
core/utils/dataset_instance.py
+7
-1
models/match/dssm/config.yaml
models/match/dssm/config.yaml
+2
-1
models/match/multiview-simnet/config.yaml
models/match/multiview-simnet/config.yaml
+65
-41
models/match/multiview-simnet/model.py
models/match/multiview-simnet/model.py
+67
-181
models/recall/gnn/config.yaml
models/recall/gnn/config.yaml
+63
-38
models/recall/gnn/evaluate_reader.py
models/recall/gnn/evaluate_reader.py
+2
-3
models/recall/gnn/model.py
models/recall/gnn/model.py
+70
-115
models/recall/gnn/reader.py
models/recall/gnn/reader.py
+1
-3
未找到文件。
core/model.py
浏览文件 @
0085a4f2
...
@@ -149,11 +149,11 @@ class Model(object):
...
@@ -149,11 +149,11 @@ class Model(object):
return
optimizer_i
return
optimizer_i
def
optimizer
(
self
):
def
optimizer
(
self
):
learning_rate
=
envs
.
get_global_env
(
"hyper_parameters.learning_rate"
,
opt_name
=
envs
.
get_global_env
(
"hyper_parameters.optimizer.class"
)
None
,
self
.
_namespace
)
opt_lr
=
envs
.
get_global_env
(
"hyper_parameters.optimizer.learning_rate"
)
opt
imizer
=
envs
.
get_global_env
(
"hyper_parameters.optimizer"
,
None
,
opt
_strategy
=
envs
.
get_global_env
(
"hyper_parameters.optimizer.strategy"
)
self
.
_namespace
)
return
self
.
_build_optimizer
(
opt
imizer
,
learning_rate
)
return
self
.
_build_optimizer
(
opt
_name
,
opt_lr
,
opt_strategy
)
def
input_data
(
self
,
is_infer
=
False
,
**
kwargs
):
def
input_data
(
self
,
is_infer
=
False
,
**
kwargs
):
name
=
"dataset."
+
kwargs
.
get
(
"dataset_name"
)
+
"."
name
=
"dataset."
+
kwargs
.
get
(
"dataset_name"
)
+
"."
...
...
core/trainers/single_infer.py
浏览文件 @
0085a4f2
...
@@ -167,6 +167,7 @@ class SingleInfer(TranspileTrainer):
...
@@ -167,6 +167,7 @@ class SingleInfer(TranspileTrainer):
model
=
envs
.
lazy_instance_by_fliename
(
model
=
envs
.
lazy_instance_by_fliename
(
model_path
,
"Model"
)(
self
.
_env
)
model_path
,
"Model"
)(
self
.
_env
)
model
.
_infer_data_var
=
model
.
input_data
(
model
.
_infer_data_var
=
model
.
input_data
(
is_infer
=
True
,
dataset_name
=
model_dict
[
"dataset_name"
])
dataset_name
=
model_dict
[
"dataset_name"
])
if
envs
.
get_global_env
(
"dataset."
+
dataset_name
+
if
envs
.
get_global_env
(
"dataset."
+
dataset_name
+
".type"
)
==
"DataLoader"
:
".type"
)
==
"DataLoader"
:
...
...
core/trainers/single_trainer.py
浏览文件 @
0085a4f2
...
@@ -147,11 +147,6 @@ class SingleTrainer(TranspileTrainer):
...
@@ -147,11 +147,6 @@ class SingleTrainer(TranspileTrainer):
startup_program
=
fluid
.
Program
()
startup_program
=
fluid
.
Program
()
scope
=
fluid
.
Scope
()
scope
=
fluid
.
Scope
()
dataset_name
=
model_dict
[
"dataset_name"
]
dataset_name
=
model_dict
[
"dataset_name"
]
opt_name
=
envs
.
get_global_env
(
"hyper_parameters.optimizer.class"
)
opt_lr
=
envs
.
get_global_env
(
"hyper_parameters.optimizer.learning_rate"
)
opt_strategy
=
envs
.
get_global_env
(
"hyper_parameters.optimizer.strategy"
)
with
fluid
.
program_guard
(
train_program
,
startup_program
):
with
fluid
.
program_guard
(
train_program
,
startup_program
):
with
fluid
.
unique_name
.
guard
():
with
fluid
.
unique_name
.
guard
():
with
fluid
.
scope_guard
(
scope
):
with
fluid
.
scope_guard
(
scope
):
...
@@ -168,8 +163,7 @@ class SingleTrainer(TranspileTrainer):
...
@@ -168,8 +163,7 @@ class SingleTrainer(TranspileTrainer):
self
.
_get_dataloader
(
dataset_name
,
self
.
_get_dataloader
(
dataset_name
,
model
.
_data_loader
)
model
.
_data_loader
)
model
.
net
(
model
.
_data_var
,
False
)
model
.
net
(
model
.
_data_var
,
False
)
optimizer
=
model
.
_build_optimizer
(
opt_name
,
opt_lr
,
optimizer
=
model
.
optimizer
()
opt_strategy
)
optimizer
.
minimize
(
model
.
_cost
)
optimizer
.
minimize
(
model
.
_cost
)
self
.
_model
[
model_dict
[
"name"
]][
0
]
=
train_program
self
.
_model
[
model_dict
[
"name"
]][
0
]
=
train_program
self
.
_model
[
model_dict
[
"name"
]][
1
]
=
startup_program
self
.
_model
[
model_dict
[
"name"
]][
1
]
=
startup_program
...
...
core/utils/dataset_instance.py
浏览文件 @
0085a4f2
...
@@ -14,7 +14,8 @@
...
@@ -14,7 +14,8 @@
from
__future__
import
print_function
from
__future__
import
print_function
import
sys
import
sys
import
yaml
from
paddlerec.core.utils
import
envs
from
paddlerec.core.utils.envs
import
lazy_instance_by_fliename
from
paddlerec.core.utils.envs
import
lazy_instance_by_fliename
from
paddlerec.core.reader
import
SlotReader
from
paddlerec.core.reader
import
SlotReader
...
@@ -38,6 +39,11 @@ else:
...
@@ -38,6 +39,11 @@ else:
yaml_abs_path
=
sys
.
argv
[
3
]
yaml_abs_path
=
sys
.
argv
[
3
]
with
open
(
yaml_abs_path
,
'r'
)
as
rb
:
config
=
yaml
.
load
(
rb
.
read
(),
Loader
=
yaml
.
FullLoader
)
envs
.
set_global_envs
()
envs
.
update_workspace
()
if
reader_name
!=
"SlotReader"
:
if
reader_name
!=
"SlotReader"
:
reader_class
=
lazy_instance_by_fliename
(
reader_package
,
reader_name
)
reader_class
=
lazy_instance_by_fliename
(
reader_package
,
reader_name
)
reader
=
reader_class
(
yaml_abs_path
)
reader
=
reader_class
(
yaml_abs_path
)
...
...
models/match/dssm/config.yaml
浏览文件 @
0085a4f2
...
@@ -53,13 +53,14 @@ runner:
...
@@ -53,13 +53,14 @@ runner:
save_inference_feed_varnames
:
[
"
query"
,
"
doc_pos"
]
# feed vars of save inference
save_inference_feed_varnames
:
[
"
query"
,
"
doc_pos"
]
# feed vars of save inference
save_inference_fetch_varnames
:
[
"
cos_sim_0.tmp_0"
]
# fetch vars of save inference
save_inference_fetch_varnames
:
[
"
cos_sim_0.tmp_0"
]
# fetch vars of save inference
init_model_path
:
"
"
# load model path
init_model_path
:
"
"
# load model path
fetch_period
:
10
fetch_period
:
2
-
name
:
runner2
-
name
:
runner2
class
:
single_infer
class
:
single_infer
# num of epochs
# num of epochs
epochs
:
1
epochs
:
1
# device to run training or infer
# device to run training or infer
device
:
cpu
device
:
cpu
fetch_period
:
1
init_model_path
:
"
increment/2"
# load model path
init_model_path
:
"
increment/2"
# load model path
# runner will run all the phase in each epoch
# runner will run all the phase in each epoch
...
...
models/match/multiview-simnet/config.yaml
浏览文件 @
0085a4f2
...
@@ -11,49 +11,73 @@
...
@@ -11,49 +11,73 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
evaluate
:
workspace
:
"
paddlerec.models.match.multiview-simnet"
reader
:
batch_size
:
2
class
:
"
{workspace}/evaluate_reader.py"
test_data_path
:
"
{workspace}/data/test"
train
:
# workspace
trainer
:
workspace
:
"
paddlerec.models.match.multiview-simnet"
# for cluster training
strategy
:
"
async"
epochs
:
2
# list of dataset
workspace
:
"
paddlerec.models.match.multiview-simnet"
dataset
:
-
name
:
dataset_train
# name of dataset to distinguish different datasets
batch_size
:
2
type
:
DataLoader
# or QueueDataset
data_path
:
"
{workspace}/data/train"
sparse_slots
:
"
1
2
3"
-
name
:
dataset_infer
# name
batch_size
:
2
type
:
DataLoader
# or QueueDataset
data_path
:
"
{workspace}/data/test"
sparse_slots
:
"
1
2"
reader
:
# hyper parameters of user-defined network
batch_size
:
2
hyper_parameters
:
class
:
"
{workspace}/reader.py"
optimizer
:
train_data_path
:
"
{workspace}/data/train"
class
:
Adam
dataset_class
:
"
DataLoader"
learning_rate
:
0.0001
strategy
:
async
query_encoder
:
"
bow"
title_encoder
:
"
bow"
query_encode_dim
:
128
title_encode_dim
:
128
sparse_feature_dim
:
1000001
embedding_dim
:
128
hidden_size
:
128
margin
:
0.1
model
:
# select runner by name
models
:
"
{workspace}/model.py"
mode
:
runner1
hyper_parameters
:
# config of each runner.
use_DataLoader
:
True
# runner is a kind of paddle training class, which wraps the train/infer process.
query_encoder
:
"
bow"
runner
:
title_encoder
:
"
bow"
-
name
:
runner1
query_encode_dim
:
128
class
:
single_train
title_encode_dim
:
128
# num of epochs
query_slots
:
1
epochs
:
2
title_slots
:
1
# device to run training or infer
sparse_feature_dim
:
1000001
device
:
cpu
embedding_dim
:
128
save_checkpoint_interval
:
1
# save model interval of epochs
hidden_size
:
128
save_inference_interval
:
1
# save inference
learning_rate
:
0.0001
save_checkpoint_path
:
"
increment"
# save checkpoint path
optimizer
:
adam
save_inference_path
:
"
inference"
# save inference path
save_inference_feed_varnames
:
[]
# feed vars of save inference
save_inference_fetch_varnames
:
[]
# fetch vars of save inference
init_model_path
:
"
"
# load model path
fetch_period
:
1
-
name
:
runner2
class
:
single_infer
# num of epochs
epochs
:
1
# device to run training or infer
device
:
cpu
fetch_period
:
1
init_model_path
:
"
increment/0"
# load model path
save
:
# runner will run all the phase in each epoch
increment
:
phase
:
dirname
:
"
increment"
-
name
:
phase1
epoch_interval
:
1
model
:
"
{workspace}/model.py"
# user-defined model
save_last
:
True
dataset_name
:
dataset_train
# select dataset by name
inference
:
thread_num
:
1
dirname
:
"
inference"
#- name: phase2
epoch_interval
:
1
# model: "{workspace}/model.py" # user-defined model
save_last
:
True
# dataset_name: dataset_infer # select dataset by name
# thread_num: 1
models/match/multiview-simnet/model.py
浏览文件 @
0085a4f2
...
@@ -99,146 +99,88 @@ class SimpleEncoderFactory(object):
...
@@ -99,146 +99,88 @@ class SimpleEncoderFactory(object):
class
Model
(
ModelBase
):
class
Model
(
ModelBase
):
def
__init__
(
self
,
config
):
def
__init__
(
self
,
config
):
ModelBase
.
__init__
(
self
,
config
)
ModelBase
.
__init__
(
self
,
config
)
self
.
init_config
()
def
init_config
(
self
):
self
.
_fetch_interval
=
1
query_encoder
=
envs
.
get_global_env
(
"hyper_parameters.query_encoder"
,
None
,
self
.
_namespace
)
title_encoder
=
envs
.
get_global_env
(
"hyper_parameters.title_encoder"
,
None
,
self
.
_namespace
)
query_encode_dim
=
envs
.
get_global_env
(
"hyper_parameters.query_encode_dim"
,
None
,
self
.
_namespace
)
title_encode_dim
=
envs
.
get_global_env
(
"hyper_parameters.title_encode_dim"
,
None
,
self
.
_namespace
)
query_slots
=
envs
.
get_global_env
(
"hyper_parameters.query_slots"
,
None
,
self
.
_namespace
)
title_slots
=
envs
.
get_global_env
(
"hyper_parameters.title_slots"
,
None
,
self
.
_namespace
)
factory
=
SimpleEncoderFactory
()
self
.
query_encoders
=
[
factory
.
create
(
query_encoder
,
query_encode_dim
)
for
i
in
range
(
query_slots
)
]
self
.
title_encoders
=
[
factory
.
create
(
title_encoder
,
title_encode_dim
)
for
i
in
range
(
title_slots
)
]
self
.
emb_size
=
envs
.
get_global_env
(
def
_init_hyper_parameters
(
self
):
"hyper_parameters.sparse_feature_dim"
,
None
,
self
.
_namespace
)
self
.
query_encoder
=
envs
.
get_global_env
(
"hyper_parameters.query_encoder"
)
self
.
emb_dim
=
envs
.
get_global_env
(
"hyper_parameters.embedding_dim"
,
self
.
title_encoder
=
envs
.
get_global_env
(
"hyper_parameters.title_encoder"
)
None
,
self
.
_namespace
)
self
.
query_encode_dim
=
envs
.
get_global_env
(
"hyper_parameters.query_encode_dim"
)
self
.
title_encode_dim
=
envs
.
get_global_env
(
"hyper_parameters.title_encode_dim"
)
self
.
emb_size
=
envs
.
get_global_env
(
"hyper_parameters.sparse_feature_dim"
)
self
.
emb_dim
=
envs
.
get_global_env
(
"hyper_parameters.embedding_dim"
)
self
.
emb_shape
=
[
self
.
emb_size
,
self
.
emb_dim
]
self
.
emb_shape
=
[
self
.
emb_size
,
self
.
emb_dim
]
self
.
hidden_size
=
envs
.
get_global_env
(
"hyper_parameters.hidden_size"
,
None
,
self
.
_namespace
)
self
.
margin
=
0.1
def
input
(
self
,
is_train
=
True
):
self
.
q_slots
=
[
fluid
.
data
(
name
=
"%d"
%
i
,
shape
=
[
None
,
1
],
lod_level
=
1
,
dtype
=
'int64'
)
for
i
in
range
(
len
(
self
.
query_encoders
))
]
self
.
pt_slots
=
[
fluid
.
data
(
name
=
"%d"
%
(
i
+
len
(
self
.
query_encoders
)),
shape
=
[
None
,
1
],
lod_level
=
1
,
dtype
=
'int64'
)
for
i
in
range
(
len
(
self
.
title_encoders
))
]
if
is_train
==
False
:
self
.
hidden_size
=
envs
.
get_global_env
(
"hyper_parameters.hidden_size"
)
return
self
.
q_slots
+
self
.
pt_slots
self
.
margin
=
envs
.
get_global_env
(
"hyper_parameters.margin"
)
self
.
nt_slots
=
[
def
net
(
self
,
input
,
is_infer
=
False
):
fluid
.
data
(
factory
=
SimpleEncoderFactory
()
name
=
"%d"
%
self
.
q_slots
=
self
.
_sparse_data_var
[
0
:
1
]
(
i
+
len
(
self
.
query_encoders
)
+
len
(
self
.
title_encoders
)),
self
.
query_encoders
=
[
shape
=
[
None
,
1
],
factory
.
create
(
self
.
query_encoder
,
self
.
query_encode_dim
)
lod_level
=
1
,
for
_
in
self
.
q_slots
dtype
=
'int64'
)
for
i
in
range
(
len
(
self
.
title_encoders
))
]
]
return
self
.
q_slots
+
self
.
pt_slots
+
self
.
nt_slots
def
train_input
(
self
):
res
=
self
.
input
()
self
.
_data_var
=
res
use_dataloader
=
envs
.
get_global_env
(
"hyper_parameters.use_DataLoader"
,
False
,
self
.
_namespace
)
if
self
.
_platform
!=
"LINUX"
or
use_dataloader
:
self
.
_data_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
feed_list
=
self
.
_data_var
,
capacity
=
256
,
use_double_buffer
=
False
,
iterable
=
False
)
def
get_acc
(
self
,
x
,
y
):
less
=
tensor
.
cast
(
cf
.
less_than
(
x
,
y
),
dtype
=
'float32'
)
label_ones
=
fluid
.
layers
.
fill_constant_batch_size_like
(
input
=
x
,
dtype
=
'float32'
,
shape
=
[
-
1
,
1
],
value
=
1.0
)
correct
=
fluid
.
layers
.
reduce_sum
(
less
)
total
=
fluid
.
layers
.
reduce_sum
(
label_ones
)
acc
=
fluid
.
layers
.
elementwise_div
(
correct
,
total
)
return
acc
def
net
(
self
):
q_embs
=
[
q_embs
=
[
fluid
.
embedding
(
fluid
.
embedding
(
input
=
query
,
size
=
self
.
emb_shape
,
param_attr
=
"emb"
)
input
=
query
,
size
=
self
.
emb_shape
,
param_attr
=
"emb"
)
for
query
in
self
.
q_slots
for
query
in
self
.
q_slots
]
]
pt_embs
=
[
# encode each embedding field with encoder
q_encodes
=
[
self
.
query_encoders
[
i
].
forward
(
emb
)
for
i
,
emb
in
enumerate
(
q_embs
)
]
# concat multi view for query, pos_title, neg_title
q_concat
=
fluid
.
layers
.
concat
(
q_encodes
)
# projection of hidden layer
q_hid
=
fluid
.
layers
.
fc
(
q_concat
,
size
=
self
.
hidden_size
,
param_attr
=
'q_fc.w'
,
bias_attr
=
'q_fc.b'
)
self
.
pt_slots
=
self
.
_sparse_data_var
[
1
:
2
]
self
.
title_encoders
=
[
factory
.
create
(
self
.
title_encoder
,
self
.
title_encode_dim
)
]
pt_embs
=
[
fluid
.
embedding
(
fluid
.
embedding
(
input
=
title
,
size
=
self
.
emb_shape
,
param_attr
=
"emb"
)
input
=
title
,
size
=
self
.
emb_shape
,
param_attr
=
"emb"
)
for
title
in
self
.
pt_slots
for
title
in
self
.
pt_slots
]
]
nt_embs
=
[
pt_encodes
=
[
self
.
title_encoders
[
i
].
forward
(
emb
)
for
i
,
emb
in
enumerate
(
pt_embs
)
]
pt_concat
=
fluid
.
layers
.
concat
(
pt_encodes
)
pt_hid
=
fluid
.
layers
.
fc
(
pt_concat
,
size
=
self
.
hidden_size
,
param_attr
=
't_fc.w'
,
bias_attr
=
't_fc.b'
)
# cosine of hidden layers
cos_pos
=
fluid
.
layers
.
cos_sim
(
q_hid
,
pt_hid
)
if
is_infer
:
self
.
_infer_results
[
'query_pt_sim'
]
=
cos_pos
return
self
.
nt_slots
=
self
.
_sparse_data_var
[
2
:
3
]
nt_embs
=
[
fluid
.
embedding
(
fluid
.
embedding
(
input
=
title
,
size
=
self
.
emb_shape
,
param_attr
=
"emb"
)
input
=
title
,
size
=
self
.
emb_shape
,
param_attr
=
"emb"
)
for
title
in
self
.
nt_slots
for
title
in
self
.
nt_slots
]
]
nt_encodes
=
[
# encode each embedding field with encoder
q_encodes
=
[
self
.
query_encoders
[
i
].
forward
(
emb
)
for
i
,
emb
in
enumerate
(
q_embs
)
]
pt_encodes
=
[
self
.
title_encoders
[
i
].
forward
(
emb
)
for
i
,
emb
in
enumerate
(
pt_embs
)
]
nt_encodes
=
[
self
.
title_encoders
[
i
].
forward
(
emb
)
self
.
title_encoders
[
i
].
forward
(
emb
)
for
i
,
emb
in
enumerate
(
nt_embs
)
for
i
,
emb
in
enumerate
(
nt_embs
)
]
]
nt_concat
=
fluid
.
layers
.
concat
(
nt_encodes
)
# concat multi view for query, pos_title, neg_title
nt_hid
=
fluid
.
layers
.
fc
(
nt_concat
,
q_concat
=
fluid
.
layers
.
concat
(
q_encodes
)
pt_concat
=
fluid
.
layers
.
concat
(
pt_encodes
)
nt_concat
=
fluid
.
layers
.
concat
(
nt_encodes
)
# projection of hidden layer
q_hid
=
fluid
.
layers
.
fc
(
q_concat
,
size
=
self
.
hidden_size
,
param_attr
=
'q_fc.w'
,
bias_attr
=
'q_fc.b'
)
pt_hid
=
fluid
.
layers
.
fc
(
pt_concat
,
size
=
self
.
hidden_size
,
param_attr
=
't_fc.w'
,
bias_attr
=
't_fc.b'
)
nt_hid
=
fluid
.
layers
.
fc
(
nt_concat
,
size
=
self
.
hidden_size
,
size
=
self
.
hidden_size
,
param_attr
=
't_fc.w'
,
param_attr
=
't_fc.w'
,
bias_attr
=
't_fc.b'
)
bias_attr
=
't_fc.b'
)
cos_neg
=
fluid
.
layers
.
cos_sim
(
q_hid
,
nt_hid
)
# cosine of hidden layers
# pairwise hinge_loss
cos_pos
=
fluid
.
layers
.
cos_sim
(
q_hid
,
pt_hid
)
cos_neg
=
fluid
.
layers
.
cos_sim
(
q_hid
,
nt_hid
)
# pairwise hinge_loss
loss_part1
=
fluid
.
layers
.
elementwise_sub
(
loss_part1
=
fluid
.
layers
.
elementwise_sub
(
tensor
.
fill_constant_batch_size_like
(
tensor
.
fill_constant_batch_size_like
(
input
=
cos_pos
,
input
=
cos_pos
,
...
@@ -254,72 +196,16 @@ class Model(ModelBase):
...
@@ -254,72 +196,16 @@ class Model(ModelBase):
input
=
loss_part2
,
shape
=
[
-
1
,
1
],
value
=
0.0
,
dtype
=
'float32'
),
input
=
loss_part2
,
shape
=
[
-
1
,
1
],
value
=
0.0
,
dtype
=
'float32'
),
loss_part2
)
loss_part2
)
self
.
avg
_cost
=
fluid
.
layers
.
mean
(
loss_part3
)
self
.
_cost
=
fluid
.
layers
.
mean
(
loss_part3
)
self
.
acc
=
self
.
get_acc
(
cos_neg
,
cos_pos
)
self
.
acc
=
self
.
get_acc
(
cos_neg
,
cos_pos
)
self
.
_metrics
[
"loss"
]
=
self
.
_cost
def
avg_loss
(
self
):
self
.
_cost
=
self
.
avg_cost
def
metrics
(
self
):
self
.
_metrics
[
"loss"
]
=
self
.
avg_cost
self
.
_metrics
[
"acc"
]
=
self
.
acc
self
.
_metrics
[
"acc"
]
=
self
.
acc
def
train_net
(
self
):
def
get_acc
(
self
,
x
,
y
):
self
.
train_input
()
less
=
tensor
.
cast
(
cf
.
less_than
(
x
,
y
),
dtype
=
'float32'
)
self
.
net
()
label_ones
=
fluid
.
layers
.
fill_constant_batch_size_like
(
self
.
avg_loss
()
input
=
x
,
dtype
=
'float32'
,
shape
=
[
-
1
,
1
],
value
=
1.0
)
self
.
metrics
()
correct
=
fluid
.
layers
.
reduce_sum
(
less
)
total
=
fluid
.
layers
.
reduce_sum
(
label_ones
)
def
optimizer
(
self
):
acc
=
fluid
.
layers
.
elementwise_div
(
correct
,
total
)
learning_rate
=
envs
.
get_global_env
(
"hyper_parameters.learning_rate"
,
return
acc
None
,
self
.
_namespace
)
optimizer
=
fluid
.
optimizer
.
Adam
(
learning_rate
=
learning_rate
)
return
optimizer
def
infer_input
(
self
):
res
=
self
.
input
(
is_train
=
False
)
self
.
_infer_data_var
=
res
self
.
_infer_data_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
feed_list
=
self
.
_infer_data_var
,
capacity
=
64
,
use_double_buffer
=
False
,
iterable
=
False
)
def
infer_net
(
self
):
self
.
infer_input
()
# lookup embedding for each slot
q_embs
=
[
fluid
.
embedding
(
input
=
query
,
size
=
self
.
emb_shape
,
param_attr
=
"emb"
)
for
query
in
self
.
q_slots
]
pt_embs
=
[
fluid
.
embedding
(
input
=
title
,
size
=
self
.
emb_shape
,
param_attr
=
"emb"
)
for
title
in
self
.
pt_slots
]
# encode each embedding field with encoder
q_encodes
=
[
self
.
query_encoders
[
i
].
forward
(
emb
)
for
i
,
emb
in
enumerate
(
q_embs
)
]
pt_encodes
=
[
self
.
title_encoders
[
i
].
forward
(
emb
)
for
i
,
emb
in
enumerate
(
pt_embs
)
]
# concat multi view for query, pos_title, neg_title
q_concat
=
fluid
.
layers
.
concat
(
q_encodes
)
pt_concat
=
fluid
.
layers
.
concat
(
pt_encodes
)
# projection of hidden layer
q_hid
=
fluid
.
layers
.
fc
(
q_concat
,
size
=
self
.
hidden_size
,
param_attr
=
'q_fc.w'
,
bias_attr
=
'q_fc.b'
)
pt_hid
=
fluid
.
layers
.
fc
(
pt_concat
,
size
=
self
.
hidden_size
,
param_attr
=
't_fc.w'
,
bias_attr
=
't_fc.b'
)
# cosine of hidden layers
cos
=
fluid
.
layers
.
cos_sim
(
q_hid
,
pt_hid
)
self
.
_infer_results
[
'query_pt_sim'
]
=
cos
models/recall/gnn/config.yaml
浏览文件 @
0085a4f2
...
@@ -11,46 +11,71 @@
...
@@ -11,46 +11,71 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
evaluate
:
workspace
:
"
paddlerec.models.recall.gnn"
reader
:
batch_size
:
50
class
:
"
{workspace}/evaluate_reader.py"
test_data_path
:
"
{workspace}/data/test"
train
:
# workspace
trainer
:
workspace
:
"
paddlerec.models.recall.gnn"
# for cluster training
strategy
:
"
async"
epochs
:
2
# list of dataset
workspace
:
"
paddlerec.models.recall.gnn"
dataset
:
-
name
:
dataset_train
# name of dataset to distinguish different datasets
batch_size
:
100
type
:
DataLoader
# or QueueDataset
data_path
:
"
{workspace}/data/train"
data_converter
:
"
{workspace}/reader.py"
-
name
:
dataset_infer
# name
batch_size
:
50
type
:
DataLoader
# or QueueDataset
data_path
:
"
{workspace}/data/test"
data_converter
:
"
{workspace}/evaluate_reader.py"
reader
:
# hyper parameters of user-defined network
batch_size
:
100
hyper_parameters
:
class
:
"
{workspace}/reader.py"
optimizer
:
train_data_path
:
"
{workspace}/data/train"
class
:
Adam
dataset_class
:
"
DataLoader"
learning_rate
:
0.001
decay_steps
:
3
decay_rate
:
0.1
l2
:
0.00001
sparse_feature_nums
:
43098
sparse_feature_dim
:
100
corpus_size
:
719470
gnn_propogation_steps
:
1
model
:
# select runner by name
models
:
"
{workspace}/model.py"
mode
:
runner1
hyper_parameters
:
# config of each runner.
use_DataLoader
:
True
# runner is a kind of paddle training class, which wraps the train/infer process.
config_path
:
"
{workspace}/data/config.txt"
runner
:
sparse_feature_dim
:
100
-
name
:
runner1
gnn_propogation_steps
:
1
class
:
single_train
learning_rate
:
0.001
# num of epochs
l2
:
0.00001
epochs
:
2
decay_steps
:
3
# device to run training or infer
decay_rate
:
0.1
device
:
cpu
optimizer
:
adam
save_checkpoint_interval
:
1
# save model interval of epochs
save_inference_interval
:
1
# save inference
save_checkpoint_path
:
"
increment"
# save checkpoint path
save_inference_path
:
"
inference"
# save inference path
save_inference_feed_varnames
:
[]
# feed vars of save inference
save_inference_fetch_varnames
:
[]
# fetch vars of save inference
init_model_path
:
"
"
# load model path
fetch_period
:
10
-
name
:
runner2
class
:
single_infer
# num of epochs
epochs
:
1
# device to run training or infer
device
:
cpu
fetch_period
:
1
init_model_path
:
"
increment/0"
# load model path
save
:
# runner will run all the phase in each epoch
increment
:
phase
:
dirname
:
"
increment"
-
name
:
phase1
epoch_interval
:
1
model
:
"
{workspace}/model.py"
# user-defined model
save_last
:
True
dataset_name
:
dataset_train
# select dataset by name
inference
:
thread_num
:
1
dirname
:
"
inference"
#- name: phase2
epoch_interval
:
1
# model: "{workspace}/model.py" # user-defined model
save_last
:
True
# dataset_name: dataset_infer # select dataset by name
# thread_num: 1
models/recall/gnn/evaluate_reader.py
浏览文件 @
0085a4f2
...
@@ -21,10 +21,9 @@ from paddlerec.core.reader import Reader
...
@@ -21,10 +21,9 @@ from paddlerec.core.reader import Reader
from
paddlerec.core.utils
import
envs
from
paddlerec.core.utils
import
envs
class
Evaluate
Reader
(
Reader
):
class
Train
Reader
(
Reader
):
def
init
(
self
):
def
init
(
self
):
self
.
batch_size
=
envs
.
get_global_env
(
"batch_size"
,
None
,
self
.
batch_size
=
envs
.
get_global_env
(
"dataset.dataset_infer.batch_size"
)
"evaluate.reader"
)
self
.
input
=
[]
self
.
input
=
[]
self
.
length
=
None
self
.
length
=
None
...
...
models/recall/gnn/model.py
浏览文件 @
0085a4f2
...
@@ -25,74 +25,59 @@ from paddlerec.core.model import Model as ModelBase
...
@@ -25,74 +25,59 @@ from paddlerec.core.model import Model as ModelBase
class
Model
(
ModelBase
):
class
Model
(
ModelBase
):
def
__init__
(
self
,
config
):
def
__init__
(
self
,
config
):
ModelBase
.
__init__
(
self
,
config
)
ModelBase
.
__init__
(
self
,
config
)
self
.
init_config
()
def
init_config
(
self
):
def
_init_hyper_parameters
(
self
):
self
.
_fetch_interval
=
1
self
.
learning_rate
=
envs
.
get_global_env
(
"hyper_parameters.optimizer.learning_rate"
)
self
.
items_num
,
self
.
ins_num
=
self
.
config_read
(
self
.
decay_steps
=
envs
.
get_global_env
(
"hyper_parameters.optimizer.decay_steps"
)
envs
.
get_global_env
(
"hyper_parameters.config_path"
,
None
,
self
.
decay_rate
=
envs
.
get_global_env
(
"hyper_parameters.optimizer.decay_rate"
)
self
.
_namespace
))
self
.
l2
=
envs
.
get_global_env
(
"hyper_parameters.optimizer.l2"
)
self
.
train_batch_size
=
envs
.
get_global_env
(
"batch_size"
,
None
,
"train.reader"
)
self
.
dict_size
=
envs
.
get_global_env
(
"hyper_parameters.sparse_feature_nums"
)
self
.
evaluate_batch_size
=
envs
.
get_global_env
(
"batch_size"
,
None
,
self
.
corpus_size
=
envs
.
get_global_env
(
"hyper_parameters.corpus_size"
)
"evaluate.reader"
)
self
.
hidden_size
=
envs
.
get_global_env
(
self
.
train_batch_size
=
envs
.
get_global_env
(
"dataset.dataset_train.batch_size"
)
"hyper_parameters.sparse_feature_dim"
,
None
,
self
.
_namespace
)
self
.
evaluate_batch_size
=
envs
.
get_global_env
(
"dataset.dataset_infer.batch_size"
)
self
.
step
=
envs
.
get_global_env
(
"hyper_parameters.gnn_propogation_steps"
,
None
,
self
.
_namespace
)
self
.
hidden_size
=
envs
.
get_global_env
(
"hyper_parameters.sparse_feature_dim"
)
self
.
step
=
envs
.
get_global_env
(
"hyper_parameters.gnn_propogation_steps"
)
def
config_read
(
self
,
config_path
=
None
):
if
config_path
is
None
:
def
input_data
(
self
,
is_infer
=
False
,
**
kwargs
):
raise
ValueError
(
if
is_infer
:
"please set train.model.hyper_parameters.config_path at first"
)
bs
=
self
.
evaluate_batch_size
with
open
(
config_path
,
"r"
)
as
fin
:
else
:
item_nums
=
int
(
fin
.
readline
().
strip
())
bs
=
self
.
train_batch_size
ins_nums
=
int
(
fin
.
readline
().
strip
())
items
=
fluid
.
data
(
return
item_nums
,
ins_nums
def
input
(
self
,
bs
):
self
.
items
=
fluid
.
data
(
name
=
"items"
,
shape
=
[
bs
,
-
1
],
name
=
"items"
,
shape
=
[
bs
,
-
1
],
dtype
=
"int64"
)
# [batch_size, uniq_max]
dtype
=
"int64"
)
# [batch_size, uniq_max]
se
lf
.
se
q_index
=
fluid
.
data
(
seq_index
=
fluid
.
data
(
name
=
"seq_index"
,
shape
=
[
bs
,
-
1
,
2
],
name
=
"seq_index"
,
shape
=
[
bs
,
-
1
,
2
],
dtype
=
"int32"
)
# [batch_size, seq_max, 2]
dtype
=
"int32"
)
# [batch_size, seq_max, 2]
self
.
last_index
=
fluid
.
data
(
last_index
=
fluid
.
data
(
name
=
"last_index"
,
shape
=
[
bs
,
2
],
dtype
=
"int32"
)
# [batch_size, 2]
name
=
"last_index"
,
shape
=
[
bs
,
2
],
dtype
=
"int32"
)
# [batch_size, 2]
self
.
adj_in
=
fluid
.
data
(
adj_in
=
fluid
.
data
(
name
=
"adj_in"
,
shape
=
[
bs
,
-
1
,
-
1
],
name
=
"adj_in"
,
shape
=
[
bs
,
-
1
,
-
1
],
dtype
=
"float32"
)
# [batch_size, seq_max, seq_max]
dtype
=
"float32"
)
# [batch_size, seq_max, seq_max]
self
.
adj_out
=
fluid
.
data
(
adj_out
=
fluid
.
data
(
name
=
"adj_out"
,
shape
=
[
bs
,
-
1
,
-
1
],
name
=
"adj_out"
,
shape
=
[
bs
,
-
1
,
-
1
],
dtype
=
"float32"
)
# [batch_size, seq_max, seq_max]
dtype
=
"float32"
)
# [batch_size, seq_max, seq_max]
self
.
mask
=
fluid
.
data
(
mask
=
fluid
.
data
(
name
=
"mask"
,
shape
=
[
bs
,
-
1
,
1
],
name
=
"mask"
,
shape
=
[
bs
,
-
1
,
1
],
dtype
=
"float32"
)
# [batch_size, seq_max, 1]
dtype
=
"float32"
)
# [batch_size, seq_max, 1]
self
.
label
=
fluid
.
data
(
label
=
fluid
.
data
(
name
=
"label"
,
shape
=
[
bs
,
1
],
dtype
=
"int64"
)
# [batch_size, 1]
name
=
"label"
,
shape
=
[
bs
,
1
],
dtype
=
"int64"
)
# [batch_size, 1]
res
=
[
res
=
[
self
.
items
,
self
.
seq_index
,
self
.
last_index
,
self
.
adj_in
,
items
,
seq_index
,
last_index
,
adj_in
,
adj_out
,
mask
,
label
self
.
adj_out
,
self
.
mask
,
self
.
label
]
]
return
res
return
res
def
train_input
(
self
):
def
net
(
self
,
inputs
,
is_infer
=
False
):
res
=
self
.
input
(
self
.
train_batch_size
)
if
is_infer
:
self
.
_data_var
=
res
bs
=
self
.
evaluate_batch_size
else
:
use_dataloader
=
envs
.
get_global_env
(
"hyper_parameters.use_DataLoader"
,
bs
=
self
.
train_batch_size
False
,
self
.
_namespace
)
stdv
=
1.0
/
math
.
sqrt
(
self
.
hidden_size
)
if
self
.
_platform
!=
"LINUX"
or
use_dataloader
:
self
.
_data_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
feed_list
=
self
.
_data_var
,
capacity
=
256
,
use_double_buffer
=
False
,
iterable
=
False
)
def
net
(
self
,
items_num
,
hidden_size
,
step
,
bs
):
stdv
=
1.0
/
math
.
sqrt
(
hidden_size
)
def
embedding_layer
(
input
,
def
embedding_layer
(
input
,
table_name
,
table_name
,
...
@@ -100,22 +85,22 @@ class Model(ModelBase):
...
@@ -100,22 +85,22 @@ class Model(ModelBase):
initializer_instance
=
None
):
initializer_instance
=
None
):
emb
=
fluid
.
embedding
(
emb
=
fluid
.
embedding
(
input
=
input
,
input
=
input
,
size
=
[
items_num
,
emb_dim
],
size
=
[
self
.
dict_size
,
emb_dim
],
param_attr
=
fluid
.
ParamAttr
(
param_attr
=
fluid
.
ParamAttr
(
name
=
table_name
,
initializer
=
initializer_instance
)
,
)
name
=
table_name
,
initializer
=
initializer_instance
))
return
emb
return
emb
sparse_initializer
=
fluid
.
initializer
.
Uniform
(
low
=-
stdv
,
high
=
stdv
)
sparse_initializer
=
fluid
.
initializer
.
Uniform
(
low
=-
stdv
,
high
=
stdv
)
items_emb
=
embedding_layer
(
self
.
items
,
"emb"
,
hidden_size
,
items_emb
=
embedding_layer
(
inputs
[
0
],
"emb"
,
self
.
hidden_size
,
sparse_initializer
)
sparse_initializer
)
pre_state
=
items_emb
pre_state
=
items_emb
for
i
in
range
(
step
):
for
i
in
range
(
s
elf
.
s
tep
):
pre_state
=
layers
.
reshape
(
pre_state
=
layers
.
reshape
(
x
=
pre_state
,
shape
=
[
bs
,
-
1
,
hidden_size
])
x
=
pre_state
,
shape
=
[
bs
,
-
1
,
self
.
hidden_size
])
state_in
=
layers
.
fc
(
state_in
=
layers
.
fc
(
input
=
pre_state
,
input
=
pre_state
,
name
=
"state_in"
,
name
=
"state_in"
,
size
=
hidden_size
,
size
=
self
.
hidden_size
,
act
=
None
,
act
=
None
,
num_flatten_dims
=
2
,
num_flatten_dims
=
2
,
param_attr
=
fluid
.
ParamAttr
(
param_attr
=
fluid
.
ParamAttr
(
...
@@ -127,7 +112,7 @@ class Model(ModelBase):
...
@@ -127,7 +112,7 @@ class Model(ModelBase):
state_out
=
layers
.
fc
(
state_out
=
layers
.
fc
(
input
=
pre_state
,
input
=
pre_state
,
name
=
"state_out"
,
name
=
"state_out"
,
size
=
hidden_size
,
size
=
self
.
hidden_size
,
act
=
None
,
act
=
None
,
num_flatten_dims
=
2
,
num_flatten_dims
=
2
,
param_attr
=
fluid
.
ParamAttr
(
param_attr
=
fluid
.
ParamAttr
(
...
@@ -137,33 +122,32 @@ class Model(ModelBase):
...
@@ -137,33 +122,32 @@ class Model(ModelBase):
initializer
=
fluid
.
initializer
.
Uniform
(
initializer
=
fluid
.
initializer
.
Uniform
(
low
=-
stdv
,
high
=
stdv
)))
# [batch_size, uniq_max, h]
low
=-
stdv
,
high
=
stdv
)))
# [batch_size, uniq_max, h]
state_adj_in
=
layers
.
matmul
(
self
.
adj_in
,
state_adj_in
=
layers
.
matmul
(
inputs
[
3
]
,
state_in
)
# [batch_size, uniq_max, h]
state_in
)
# [batch_size, uniq_max, h]
state_adj_out
=
layers
.
matmul
(
state_adj_out
=
layers
.
matmul
(
inputs
[
4
],
state_out
)
# [batch_size, uniq_max, h]
self
.
adj_out
,
state_out
)
# [batch_size, uniq_max, h]
gru_input
=
layers
.
concat
([
state_adj_in
,
state_adj_out
],
axis
=
2
)
gru_input
=
layers
.
concat
([
state_adj_in
,
state_adj_out
],
axis
=
2
)
gru_input
=
layers
.
reshape
(
gru_input
=
layers
.
reshape
(
x
=
gru_input
,
shape
=
[
-
1
,
hidden_size
*
2
])
x
=
gru_input
,
shape
=
[
-
1
,
self
.
hidden_size
*
2
])
gru_fc
=
layers
.
fc
(
input
=
gru_input
,
gru_fc
=
layers
.
fc
(
input
=
gru_input
,
name
=
"gru_fc"
,
name
=
"gru_fc"
,
size
=
3
*
hidden_size
,
size
=
3
*
self
.
hidden_size
,
bias_attr
=
False
)
bias_attr
=
False
)
pre_state
,
_
,
_
=
fluid
.
layers
.
gru_unit
(
pre_state
,
_
,
_
=
fluid
.
layers
.
gru_unit
(
input
=
gru_fc
,
input
=
gru_fc
,
hidden
=
layers
.
reshape
(
hidden
=
layers
.
reshape
(
x
=
pre_state
,
shape
=
[
-
1
,
hidden_size
]),
x
=
pre_state
,
shape
=
[
-
1
,
self
.
hidden_size
]),
size
=
3
*
hidden_size
)
size
=
3
*
self
.
hidden_size
)
final_state
=
layers
.
reshape
(
pre_state
,
shape
=
[
bs
,
-
1
,
hidden_size
])
final_state
=
layers
.
reshape
(
pre_state
,
shape
=
[
bs
,
-
1
,
self
.
hidden_size
])
seq
=
layers
.
gather_nd
(
final_state
,
self
.
seq_index
)
seq
=
layers
.
gather_nd
(
final_state
,
inputs
[
1
]
)
last
=
layers
.
gather_nd
(
final_state
,
self
.
last_index
)
last
=
layers
.
gather_nd
(
final_state
,
inputs
[
2
]
)
seq_fc
=
layers
.
fc
(
seq_fc
=
layers
.
fc
(
input
=
seq
,
input
=
seq
,
name
=
"seq_fc"
,
name
=
"seq_fc"
,
size
=
hidden_size
,
size
=
self
.
hidden_size
,
bias_attr
=
False
,
bias_attr
=
False
,
act
=
None
,
act
=
None
,
num_flatten_dims
=
2
,
num_flatten_dims
=
2
,
...
@@ -171,7 +155,7 @@ class Model(ModelBase):
...
@@ -171,7 +155,7 @@ class Model(ModelBase):
low
=-
stdv
,
high
=
stdv
)))
# [batch_size, seq_max, h]
low
=-
stdv
,
high
=
stdv
)))
# [batch_size, seq_max, h]
last_fc
=
layers
.
fc
(
input
=
last
,
last_fc
=
layers
.
fc
(
input
=
last
,
name
=
"last_fc"
,
name
=
"last_fc"
,
size
=
hidden_size
,
size
=
self
.
hidden_size
,
bias_attr
=
False
,
bias_attr
=
False
,
act
=
None
,
act
=
None
,
num_flatten_dims
=
1
,
num_flatten_dims
=
1
,
...
@@ -184,7 +168,7 @@ class Model(ModelBase):
...
@@ -184,7 +168,7 @@ class Model(ModelBase):
add
=
layers
.
elementwise_add
(
seq_fc_t
,
add
=
layers
.
elementwise_add
(
seq_fc_t
,
last_fc
)
# [seq_max, batch_size, h]
last_fc
)
# [seq_max, batch_size, h]
b
=
layers
.
create_parameter
(
b
=
layers
.
create_parameter
(
shape
=
[
hidden_size
],
shape
=
[
self
.
hidden_size
],
dtype
=
'float32'
,
dtype
=
'float32'
,
default_initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.0
))
# [h]
default_initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.0
))
# [h]
add
=
layers
.
elementwise_add
(
add
,
b
)
# [seq_max, batch_size, h]
add
=
layers
.
elementwise_add
(
add
,
b
)
# [seq_max, batch_size, h]
...
@@ -202,7 +186,7 @@ class Model(ModelBase):
...
@@ -202,7 +186,7 @@ class Model(ModelBase):
bias_attr
=
False
,
bias_attr
=
False
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
low
=-
stdv
,
high
=
stdv
)))
# [batch_size, seq_max, 1]
low
=-
stdv
,
high
=
stdv
)))
# [batch_size, seq_max, 1]
weight
*=
self
.
mask
weight
*=
inputs
[
5
]
weight_mask
=
layers
.
elementwise_mul
(
weight_mask
=
layers
.
elementwise_mul
(
seq
,
weight
,
axis
=
0
)
# [batch_size, seq_max, h]
seq
,
weight
,
axis
=
0
)
# [batch_size, seq_max, h]
global_attention
=
layers
.
reduce_sum
(
global_attention
=
layers
.
reduce_sum
(
...
@@ -213,7 +197,7 @@ class Model(ModelBase):
...
@@ -213,7 +197,7 @@ class Model(ModelBase):
final_attention_fc
=
layers
.
fc
(
final_attention_fc
=
layers
.
fc
(
input
=
final_attention
,
input
=
final_attention
,
name
=
"final_attention_fc"
,
name
=
"final_attention_fc"
,
size
=
hidden_size
,
size
=
self
.
hidden_size
,
bias_attr
=
False
,
bias_attr
=
False
,
act
=
None
,
act
=
None
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
...
@@ -225,7 +209,7 @@ class Model(ModelBase):
...
@@ -225,7 +209,7 @@ class Model(ModelBase):
# dtype="int64",
# dtype="int64",
# persistable=True,
# persistable=True,
# name="all_vocab")
# name="all_vocab")
all_vocab
=
np
.
arange
(
1
,
items_num
).
reshape
((
-
1
)).
astype
(
'int32'
)
all_vocab
=
np
.
arange
(
1
,
self
.
dict_size
).
reshape
((
-
1
)).
astype
(
'int32'
)
all_vocab
=
fluid
.
layers
.
cast
(
all_vocab
=
fluid
.
layers
.
cast
(
x
=
fluid
.
layers
.
assign
(
all_vocab
),
dtype
=
'int64'
)
x
=
fluid
.
layers
.
assign
(
all_vocab
),
dtype
=
'int64'
)
...
@@ -235,63 +219,34 @@ class Model(ModelBase):
...
@@ -235,63 +219,34 @@ class Model(ModelBase):
name
=
"emb"
,
name
=
"emb"
,
initializer
=
fluid
.
initializer
.
Uniform
(
initializer
=
fluid
.
initializer
.
Uniform
(
low
=-
stdv
,
high
=
stdv
)),
low
=-
stdv
,
high
=
stdv
)),
size
=
[
items_num
,
hidden_size
])
# [all_vocab, h]
size
=
[
self
.
dict_size
,
self
.
hidden_size
])
# [all_vocab, h]
logits
=
layers
.
matmul
(
logits
=
layers
.
matmul
(
x
=
final_attention_fc
,
y
=
all_emb
,
x
=
final_attention_fc
,
y
=
all_emb
,
transpose_y
=
True
)
# [batch_size, all_vocab]
transpose_y
=
True
)
# [batch_size, all_vocab]
softmax
=
layers
.
softmax_with_cross_entropy
(
softmax
=
layers
.
softmax_with_cross_entropy
(
logits
=
logits
,
label
=
self
.
label
)
# [batch_size, 1]
logits
=
logits
,
label
=
inputs
[
6
]
)
# [batch_size, 1]
self
.
loss
=
layers
.
reduce_mean
(
softmax
)
# [1]
self
.
loss
=
layers
.
reduce_mean
(
softmax
)
# [1]
self
.
acc
=
layers
.
accuracy
(
input
=
logits
,
label
=
self
.
label
,
k
=
20
)
self
.
acc
=
layers
.
accuracy
(
input
=
logits
,
label
=
inputs
[
6
]
,
k
=
20
)
def
avg_loss
(
self
):
self
.
_cost
=
self
.
loss
self
.
_cost
=
self
.
loss
if
is_infer
:
self
.
_infer_results
[
'acc'
]
=
self
.
acc
self
.
_infer_results
[
'loss'
]
=
self
.
loss
return
def
metrics
(
self
):
self
.
_metrics
[
"LOSS"
]
=
self
.
loss
self
.
_metrics
[
"LOSS"
]
=
self
.
loss
self
.
_metrics
[
"train_acc"
]
=
self
.
acc
self
.
_metrics
[
"train_acc"
]
=
self
.
acc
def
train_net
(
self
):
self
.
train_input
()
self
.
net
(
self
.
items_num
,
self
.
hidden_size
,
self
.
step
,
self
.
train_batch_size
)
self
.
avg_loss
()
self
.
metrics
()
def
optimizer
(
self
):
def
optimizer
(
self
):
learning_rate
=
envs
.
get_global_env
(
"hyper_parameters.learning_rate"
,
step_per_epoch
=
self
.
corpus_size
//
self
.
train_batch_size
None
,
self
.
_namespace
)
step_per_epoch
=
self
.
ins_num
//
self
.
train_batch_size
decay_steps
=
envs
.
get_global_env
(
"hyper_parameters.decay_steps"
,
None
,
self
.
_namespace
)
decay_rate
=
envs
.
get_global_env
(
"hyper_parameters.decay_rate"
,
None
,
self
.
_namespace
)
l2
=
envs
.
get_global_env
(
"hyper_parameters.l2"
,
None
,
self
.
_namespace
)
optimizer
=
fluid
.
optimizer
.
Adam
(
optimizer
=
fluid
.
optimizer
.
Adam
(
learning_rate
=
fluid
.
layers
.
exponential_decay
(
learning_rate
=
fluid
.
layers
.
exponential_decay
(
learning_rate
=
learning_rate
,
learning_rate
=
self
.
learning_rate
,
decay_steps
=
decay_steps
*
step_per_epoch
,
decay_steps
=
self
.
decay_steps
*
step_per_epoch
,
decay_rate
=
decay_rate
),
decay_rate
=
self
.
decay_rate
),
regularization
=
fluid
.
regularizer
.
L2DecayRegularizer
(
regularization
=
fluid
.
regularizer
.
L2DecayRegularizer
(
regularization_coeff
=
l2
))
regularization_coeff
=
self
.
l2
))
return
optimizer
return
optimizer
def
infer_input
(
self
):
self
.
_reader_namespace
=
"evaluate.reader"
res
=
self
.
input
(
self
.
evaluate_batch_size
)
self
.
_infer_data_var
=
res
self
.
_infer_data_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
feed_list
=
self
.
_infer_data_var
,
capacity
=
64
,
use_double_buffer
=
False
,
iterable
=
False
)
def
infer_net
(
self
):
self
.
infer_input
()
self
.
net
(
self
.
items_num
,
self
.
hidden_size
,
self
.
step
,
self
.
evaluate_batch_size
)
self
.
_infer_results
[
'acc'
]
=
self
.
acc
self
.
_infer_results
[
'loss'
]
=
self
.
loss
models/recall/gnn/reader.py
浏览文件 @
0085a4f2
...
@@ -23,9 +23,7 @@ from paddlerec.core.utils import envs
...
@@ -23,9 +23,7 @@ from paddlerec.core.utils import envs
class
TrainReader
(
Reader
):
class
TrainReader
(
Reader
):
def
init
(
self
):
def
init
(
self
):
self
.
batch_size
=
envs
.
get_global_env
(
"batch_size"
,
None
,
self
.
batch_size
=
envs
.
get_global_env
(
"dataset.dataset_train.batch_size"
)
"train.reader"
)
self
.
input
=
[]
self
.
input
=
[]
self
.
length
=
None
self
.
length
=
None
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录