Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
0cd3d461
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0cd3d461
编写于
5月 23, 2018
作者:
D
daminglu
提交者:
GitHub
5月 23, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Recommend sys new api (#10894)
上级
d406c76a
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
273 addition
and
0 deletion
+273
-0
python/paddle/fluid/tests/book/high-level-api/CMakeLists.txt
python/paddle/fluid/tests/book/high-level-api/CMakeLists.txt
+1
-0
python/paddle/fluid/tests/book/high-level-api/recommender_system/CMakeLists.txt
...sts/book/high-level-api/recommender_system/CMakeLists.txt
+7
-0
python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py
...-api/recommender_system/test_recommender_system_newapi.py
+265
-0
未找到文件。
python/paddle/fluid/tests/book/high-level-api/CMakeLists.txt
浏览文件 @
0cd3d461
...
...
@@ -10,3 +10,4 @@ add_subdirectory(fit_a_line)
add_subdirectory
(
recognize_digits
)
add_subdirectory
(
image_classification
)
add_subdirectory
(
understand_sentiment
)
add_subdirectory
(
recommender_system
)
python/paddle/fluid/tests/book/high-level-api/recommender_system/CMakeLists.txt
0 → 100644
浏览文件 @
0cd3d461
file
(
GLOB TEST_OPS RELATIVE
"
${
CMAKE_CURRENT_SOURCE_DIR
}
"
"test_*.py"
)
string
(
REPLACE
".py"
""
TEST_OPS
"
${
TEST_OPS
}
"
)
# default test
foreach
(
src
${
TEST_OPS
}
)
py_test
(
${
src
}
SRCS
${
src
}
.py
)
endforeach
()
python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py
0 → 100644
浏览文件 @
0cd3d461
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
math
import
sys
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.layers
as
layers
import
paddle.fluid.nets
as
nets
IS_SPARSE
=
True
USE_GPU
=
False
BATCH_SIZE
=
256
def
get_usr_combined_features
():
# FIXME(dzh) : old API integer_value(10) may have range check.
# currently we don't have user configurated check.
USR_DICT_SIZE
=
paddle
.
dataset
.
movielens
.
max_user_id
()
+
1
uid
=
layers
.
data
(
name
=
'user_id'
,
shape
=
[
1
],
dtype
=
'int64'
)
usr_emb
=
layers
.
embedding
(
input
=
uid
,
dtype
=
'float32'
,
size
=
[
USR_DICT_SIZE
,
32
],
param_attr
=
'user_table'
,
is_sparse
=
IS_SPARSE
)
usr_fc
=
layers
.
fc
(
input
=
usr_emb
,
size
=
32
)
USR_GENDER_DICT_SIZE
=
2
usr_gender_id
=
layers
.
data
(
name
=
'gender_id'
,
shape
=
[
1
],
dtype
=
'int64'
)
usr_gender_emb
=
layers
.
embedding
(
input
=
usr_gender_id
,
size
=
[
USR_GENDER_DICT_SIZE
,
16
],
param_attr
=
'gender_table'
,
is_sparse
=
IS_SPARSE
)
usr_gender_fc
=
layers
.
fc
(
input
=
usr_gender_emb
,
size
=
16
)
USR_AGE_DICT_SIZE
=
len
(
paddle
.
dataset
.
movielens
.
age_table
)
usr_age_id
=
layers
.
data
(
name
=
'age_id'
,
shape
=
[
1
],
dtype
=
"int64"
)
usr_age_emb
=
layers
.
embedding
(
input
=
usr_age_id
,
size
=
[
USR_AGE_DICT_SIZE
,
16
],
is_sparse
=
IS_SPARSE
,
param_attr
=
'age_table'
)
usr_age_fc
=
layers
.
fc
(
input
=
usr_age_emb
,
size
=
16
)
USR_JOB_DICT_SIZE
=
paddle
.
dataset
.
movielens
.
max_job_id
()
+
1
usr_job_id
=
layers
.
data
(
name
=
'job_id'
,
shape
=
[
1
],
dtype
=
"int64"
)
usr_job_emb
=
layers
.
embedding
(
input
=
usr_job_id
,
size
=
[
USR_JOB_DICT_SIZE
,
16
],
param_attr
=
'job_table'
,
is_sparse
=
IS_SPARSE
)
usr_job_fc
=
layers
.
fc
(
input
=
usr_job_emb
,
size
=
16
)
concat_embed
=
layers
.
concat
(
input
=
[
usr_fc
,
usr_gender_fc
,
usr_age_fc
,
usr_job_fc
],
axis
=
1
)
usr_combined_features
=
layers
.
fc
(
input
=
concat_embed
,
size
=
200
,
act
=
"tanh"
)
return
usr_combined_features
def
get_mov_combined_features
():
MOV_DICT_SIZE
=
paddle
.
dataset
.
movielens
.
max_movie_id
()
+
1
mov_id
=
layers
.
data
(
name
=
'movie_id'
,
shape
=
[
1
],
dtype
=
'int64'
)
mov_emb
=
layers
.
embedding
(
input
=
mov_id
,
dtype
=
'float32'
,
size
=
[
MOV_DICT_SIZE
,
32
],
param_attr
=
'movie_table'
,
is_sparse
=
IS_SPARSE
)
mov_fc
=
layers
.
fc
(
input
=
mov_emb
,
size
=
32
)
CATEGORY_DICT_SIZE
=
len
(
paddle
.
dataset
.
movielens
.
movie_categories
())
category_id
=
layers
.
data
(
name
=
'category_id'
,
shape
=
[
1
],
dtype
=
'int64'
,
lod_level
=
1
)
mov_categories_emb
=
layers
.
embedding
(
input
=
category_id
,
size
=
[
CATEGORY_DICT_SIZE
,
32
],
is_sparse
=
IS_SPARSE
)
mov_categories_hidden
=
layers
.
sequence_pool
(
input
=
mov_categories_emb
,
pool_type
=
"sum"
)
MOV_TITLE_DICT_SIZE
=
len
(
paddle
.
dataset
.
movielens
.
get_movie_title_dict
())
mov_title_id
=
layers
.
data
(
name
=
'movie_title'
,
shape
=
[
1
],
dtype
=
'int64'
,
lod_level
=
1
)
mov_title_emb
=
layers
.
embedding
(
input
=
mov_title_id
,
size
=
[
MOV_TITLE_DICT_SIZE
,
32
],
is_sparse
=
IS_SPARSE
)
mov_title_conv
=
nets
.
sequence_conv_pool
(
input
=
mov_title_emb
,
num_filters
=
32
,
filter_size
=
3
,
act
=
"tanh"
,
pool_type
=
"sum"
)
concat_embed
=
layers
.
concat
(
input
=
[
mov_fc
,
mov_categories_hidden
,
mov_title_conv
],
axis
=
1
)
# FIXME(dzh) : need tanh operator
mov_combined_features
=
layers
.
fc
(
input
=
concat_embed
,
size
=
200
,
act
=
"tanh"
)
return
mov_combined_features
def
inference_program
():
usr_combined_features
=
get_usr_combined_features
()
mov_combined_features
=
get_mov_combined_features
()
inference
=
layers
.
cos_sim
(
X
=
usr_combined_features
,
Y
=
mov_combined_features
)
scale_infer
=
layers
.
scale
(
x
=
inference
,
scale
=
5.0
)
return
scale_infer
def
train_program
():
scale_infer
=
inference_program
()
label
=
layers
.
data
(
name
=
'score'
,
shape
=
[
1
],
dtype
=
'float32'
)
square_cost
=
layers
.
square_error_cost
(
input
=
scale_infer
,
label
=
label
)
avg_cost
=
layers
.
mean
(
square_cost
)
return
[
avg_cost
,
scale_infer
]
def
train
(
use_cuda
,
train_program
,
save_path
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.2
)
trainer
=
fluid
.
Trainer
(
train_func
=
train_program
,
place
=
place
,
optimizer
=
optimizer
)
feed_order
=
[
'user_id'
,
'gender_id'
,
'age_id'
,
'job_id'
,
'movie_id'
,
'category_id'
,
'movie_title'
,
'score'
]
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndStepEvent
):
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
movielens
.
test
(),
batch_size
=
BATCH_SIZE
)
avg_cost_set
=
trainer
.
test
(
reader
=
test_reader
,
feed_order
=
feed_order
)
# get avg cost
avg_cost
=
np
.
array
(
avg_cost_set
).
mean
()
print
(
"avg_cost: %s"
%
avg_cost
)
if
float
(
avg_cost
)
<
4
:
# Smaller value to increase CI speed
trainer
.
save_params
(
save_path
)
trainer
.
stop
()
else
:
print
(
'BatchID {0}, Test Loss {1:0.2}'
.
format
(
event
.
epoch
+
1
,
float
(
avg_cost
)))
if
math
.
isnan
(
float
(
avg_cost
)):
sys
.
exit
(
"got NaN loss, training failed."
)
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
movielens
.
train
(),
buf_size
=
8192
),
batch_size
=
BATCH_SIZE
)
trainer
.
train
(
num_epochs
=
1
,
event_handler
=
event_handler
,
reader
=
train_reader
,
feed_order
=
[
'user_id'
,
'gender_id'
,
'age_id'
,
'job_id'
,
'movie_id'
,
'category_id'
,
'movie_title'
,
'score'
])
def
infer
(
use_cuda
,
inference_program
,
save_path
):
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
inferencer
=
fluid
.
Inferencer
(
inference_program
,
param_path
=
save_path
,
place
=
place
)
def
create_lod_tensor
(
data
,
lod
=
None
):
tensor
=
fluid
.
LoDTensor
()
if
lod
is
None
:
# Tensor, the shape is [batch_size, 1]
index
=
0
lod_0
=
[
index
]
for
l
in
range
(
len
(
data
)):
index
+=
1
lod_0
.
append
(
index
)
lod
=
[
lod_0
]
tensor
.
set_lod
(
lod
)
flattened_data
=
np
.
concatenate
(
data
,
axis
=
0
).
astype
(
"int64"
)
flattened_data
=
flattened_data
.
reshape
([
len
(
flattened_data
),
1
])
tensor
.
set
(
flattened_data
,
place
)
return
tensor
# Generate a random input for inference
user_id
=
create_lod_tensor
([[
1
]])
gender_id
=
create_lod_tensor
([[
1
]])
age_id
=
create_lod_tensor
([[
0
]])
job_id
=
create_lod_tensor
([[
10
]])
movie_id
=
create_lod_tensor
([[
783
]])
category_id
=
create_lod_tensor
([[
10
],
[
8
],
[
9
]],
[[
0
,
3
]])
movie_title
=
create_lod_tensor
([[
1069
],
[
4140
],
[
2923
],
[
710
],
[
988
]],
[[
0
,
5
]])
results
=
inferencer
.
infer
(
{
'user_id'
:
user_id
,
'gender_id'
:
gender_id
,
'age_id'
:
age_id
,
'job_id'
:
job_id
,
'movie_id'
:
movie_id
,
'category_id'
:
category_id
,
'movie_title'
:
movie_title
},
return_numpy
=
False
)
print
(
"infer results: "
,
np
.
array
(
results
[
0
]))
def
main
(
use_cuda
):
if
use_cuda
and
not
fluid
.
core
.
is_compiled_with_cuda
():
return
save_path
=
"recommender_system.inference.model"
train
(
use_cuda
=
use_cuda
,
train_program
=
train_program
,
save_path
=
save_path
)
infer
(
use_cuda
=
use_cuda
,
inference_program
=
inference_program
,
save_path
=
save_path
)
if
__name__
==
'__main__'
:
main
(
USE_GPU
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录