Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
fd24ab47
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
fd24ab47
编写于
3月 26, 2019
作者:
X
Xin Pan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
polish
test=develop
上级
1f89249a
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
22 addition
and
32 deletion
+22
-32
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+1
-0
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+4
-1
python/paddle/fluid/imperative/tracer.py
python/paddle/fluid/imperative/tracer.py
+2
-13
python/paddle/fluid/initializer.py
python/paddle/fluid/initializer.py
+1
-1
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+2
-0
python/paddle/fluid/tests/unittests/test_imperative_deepcf.py
...on/paddle/fluid/tests/unittests/test_imperative_deepcf.py
+12
-17
未找到文件。
paddle/fluid/pybind/pybind.cc
浏览文件 @
fd24ab47
...
@@ -235,6 +235,7 @@ PYBIND11_MODULE(core, m) {
...
@@ -235,6 +235,7 @@ PYBIND11_MODULE(core, m) {
self
.
forward_id_
=
forward_id
;
self
.
forward_id_
=
forward_id
;
},
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def_property_readonly
(
"type"
,
&
imperative
::
OpBase
::
Type
)
.
def_property
(
.
def_property
(
"backward_id"
,
"backward_id"
,
[](
const
imperative
::
OpBase
&
self
)
{
return
self
.
backward_id_
;
},
[](
const
imperative
::
OpBase
&
self
)
{
return
self
.
backward_id_
;
},
...
...
python/paddle/fluid/framework.py
浏览文件 @
fd24ab47
...
@@ -906,7 +906,10 @@ class Operator(object):
...
@@ -906,7 +906,10 @@ class Operator(object):
@
property
@
property
def
type
(
self
):
def
type
(
self
):
return
self
.
desc
.
type
()
if
_in_imperative_mode
():
return
self
.
iop
.
type
else
:
return
self
.
desc
.
type
()
def
input
(
self
,
name
):
def
input
(
self
,
name
):
"""
"""
...
...
python/paddle/fluid/imperative/tracer.py
浏览文件 @
fd24ab47
...
@@ -14,9 +14,7 @@
...
@@ -14,9 +14,7 @@
from
__future__
import
print_function
from
__future__
import
print_function
import
sys
import
six
import
six
from
six.moves
import
reduce
from
collections
import
defaultdict
from
collections
import
defaultdict
from
paddle.fluid
import
core
from
paddle.fluid
import
core
...
@@ -51,16 +49,7 @@ class Tracer(core.Tracer):
...
@@ -51,16 +49,7 @@ class Tracer(core.Tracer):
def
trace_op
(
self
,
op
,
stop_gradient
=
False
):
def
trace_op
(
self
,
op
,
stop_gradient
=
False
):
# record op's trace id
# record op's trace id
op
.
iop
.
_trace_id
=
self
.
_trace_id
op
.
iop
.
_trace_id
=
self
.
_trace_id
"""
all_input_stop_grads = True
for vars in op.inputs.values():
for v in vars:
sys.stderr.write('%s %s
\n
' % (v.name, v.stop_gradient))
all_input_stop_grads &= v.stop_gradient
stop_gradient = False if not stop_gradient else True
stop_gradient = all_input_stop_grads | stop_gradient
"""
backward_refs
=
self
.
trace
(
op
.
iop
,
op
.
inputs
,
op
.
outputs
,
op
.
attrs
,
backward_refs
=
self
.
trace
(
op
.
iop
,
op
.
inputs
,
op
.
outputs
,
op
.
attrs
,
framework
.
_current_expected_place
(),
framework
.
_current_expected_place
(),
stop_gradient
)
stop_gradient
)
...
@@ -73,7 +62,7 @@ class Tracer(core.Tracer):
...
@@ -73,7 +62,7 @@ class Tracer(core.Tracer):
if
len
(
backward_refs
)
>
0
:
if
len
(
backward_refs
)
>
0
:
op
.
iop
.
register_backward_hooks
(
release_op
)
op
.
iop
.
register_backward_hooks
(
release_op
)
# TODO(minqiyang): remove all inputs and outputs after sep
e
rate
# TODO(minqiyang): remove all inputs and outputs after sep
a
rate
# var and grad
# var and grad
op
.
backward_refs
=
defaultdict
(
list
)
op
.
backward_refs
=
defaultdict
(
list
)
for
k
,
v
in
six
.
iteritems
(
op
.
inputs
):
for
k
,
v
in
six
.
iteritems
(
op
.
inputs
):
...
...
python/paddle/fluid/initializer.py
浏览文件 @
fd24ab47
...
@@ -212,7 +212,7 @@ class UniformInitializer(Initializer):
...
@@ -212,7 +212,7 @@ class UniformInitializer(Initializer):
if
self
.
_seed
==
0
:
if
self
.
_seed
==
0
:
self
.
_seed
=
block
.
program
.
random_seed
self
.
_seed
=
block
.
program
.
random_seed
# to be compatible of fp16 initalizers
# to be compatible of fp16 init
i
alizers
if
var
.
dtype
==
VarDesc
.
VarType
.
FP16
:
if
var
.
dtype
==
VarDesc
.
VarType
.
FP16
:
out_dtype
=
VarDesc
.
VarType
.
FP32
out_dtype
=
VarDesc
.
VarType
.
FP32
out_var
=
block
.
create_var
(
out_var
=
block
.
create_var
(
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
fd24ab47
...
@@ -165,6 +165,8 @@ class Optimizer(object):
...
@@ -165,6 +165,8 @@ class Optimizer(object):
name
=
self
.
_name
+
"_"
+
name
name
=
self
.
_name
+
"_"
+
name
if
(
name
in
self
.
_accumulators
and
if
(
name
in
self
.
_accumulators
and
param
.
name
in
self
.
_accumulators
[
name
]):
param
.
name
in
self
.
_accumulators
[
name
]):
if
framework
.
_in_imperative_mode
():
return
self
.
_accumulators
[
name
][
param
.
name
]
raise
Exception
(
"Accumulator {} already exists for parameter {}"
.
raise
Exception
(
"Accumulator {} already exists for parameter {}"
.
format
(
name
,
param
.
name
))
format
(
name
,
param
.
name
))
if
shape
==
None
:
if
shape
==
None
:
...
...
python/paddle/fluid/tests/unittests/test_imperative_deepcf.py
浏览文件 @
fd24ab47
...
@@ -24,9 +24,11 @@ import paddle.fluid.core as core
...
@@ -24,9 +24,11 @@ import paddle.fluid.core as core
from
test_imperative_base
import
new_program_scope
from
test_imperative_base
import
new_program_scope
from
paddle.fluid.imperative.base
import
to_variable
from
paddle.fluid.imperative.base
import
to_variable
# Can use Amusic dataset as the DeepCF describes.
DATA_PATH
=
os
.
environ
.
get
(
'DATA_PATH'
,
''
)
DATA_PATH
=
os
.
environ
.
get
(
'DATA_PATH'
,
''
)
BATCH_SIZE
=
int
(
os
.
environ
.
get
(
'BATCH_SIZE'
,
256
))
NUM_BATCHES
=
int
(
os
.
environ
.
get
(
'NUM_BATCHES'
,
2
))
BATCH_SIZE
=
int
(
os
.
environ
.
get
(
'BATCH_SIZE'
,
128
))
NUM_BATCHES
=
int
(
os
.
environ
.
get
(
'NUM_BATCHES'
,
5
))
NUM_EPOCHES
=
int
(
os
.
environ
.
get
(
'NUM_EPOCHES'
,
1
))
NUM_EPOCHES
=
int
(
os
.
environ
.
get
(
'NUM_EPOCHES'
,
1
))
...
@@ -92,18 +94,13 @@ class DeepCF(fluid.imperative.Layer):
...
@@ -92,18 +94,13 @@ class DeepCF(fluid.imperative.Layer):
self
.
_num_users
=
num_users
self
.
_num_users
=
num_users
self
.
_num_items
=
num_items
self
.
_num_items
=
num_items
self
.
_rating_matrix
=
self
.
create_parameter
(
self
.
_rating_matrix
=
self
.
create_parameter
(
None
,
fluid
.
ParamAttr
(
trainable
=
False
)
,
matrix
.
shape
,
matrix
.
shape
,
matrix
.
dtype
,
matrix
.
dtype
,
is_bias
=
False
,
is_bias
=
False
,
default_initializer
=
fluid
.
initializer
.
NumpyArrayInitializer
(
matrix
))
default_initializer
=
fluid
.
initializer
.
NumpyArrayInitializer
(
matrix
))
self
.
_rating_matrix
.
_stop_gradient
=
True
self
.
_rating_matrix
.
_stop_gradient
=
True
# self._user_emb = fluid.imperative.Embedding(self.full_name(),
# [self._num_users, 256])
# self._item_emb = fluid.imperative.Embedding(self.full_name(),
# [self._num_items, 256])
self
.
_mlp
=
MLP
(
self
.
full_name
())
self
.
_mlp
=
MLP
(
self
.
full_name
())
self
.
_dmf
=
DMF
(
self
.
full_name
())
self
.
_dmf
=
DMF
(
self
.
full_name
())
self
.
_match_fc
=
fluid
.
imperative
.
FC
(
self
.
full_name
(),
1
,
act
=
'sigmoid'
)
self
.
_match_fc
=
fluid
.
imperative
.
FC
(
self
.
full_name
(),
1
,
act
=
'sigmoid'
)
...
@@ -111,7 +108,6 @@ class DeepCF(fluid.imperative.Layer):
...
@@ -111,7 +108,6 @@ class DeepCF(fluid.imperative.Layer):
def
forward
(
self
,
users
,
items
):
def
forward
(
self
,
users
,
items
):
# users_emb = self._user_emb(users)
# users_emb = self._user_emb(users)
# items_emb = self._item_emb(items)
# items_emb = self._item_emb(items)
sys
.
stderr
.
write
(
'forward: %s
\n
'
%
users
.
_stop_gradient
)
users_emb
=
fluid
.
layers
.
gather
(
self
.
_rating_matrix
,
users
)
users_emb
=
fluid
.
layers
.
gather
(
self
.
_rating_matrix
,
users
)
items_emb
=
fluid
.
layers
.
gather
(
items_emb
=
fluid
.
layers
.
gather
(
fluid
.
layers
.
transpose
(
self
.
_rating_matrix
,
[
1
,
0
]),
items
)
fluid
.
layers
.
transpose
(
self
.
_rating_matrix
,
[
1
,
0
]),
items
)
...
@@ -131,10 +127,10 @@ def get_data():
...
@@ -131,10 +127,10 @@ def get_data():
user_ids
=
[]
user_ids
=
[]
item_ids
=
[]
item_ids
=
[]
labels
=
[]
labels
=
[]
matrix
=
np
.
zeros
([
100
,
1000
],
dtype
=
np
.
float32
)
NUM_USERS
=
100
NUM_USERS
=
100
NUM_ITEMS
=
1000
NUM_ITEMS
=
1000
matrix
=
np
.
zeros
([
NUM_USERS
,
NUM_ITEMS
],
dtype
=
np
.
float32
)
for
uid
in
range
(
NUM_USERS
):
for
uid
in
range
(
NUM_USERS
):
for
iid
in
range
(
NUM_ITEMS
):
for
iid
in
range
(
NUM_ITEMS
):
label
=
float
(
random
.
randint
(
1
,
6
)
==
1
)
label
=
float
(
random
.
randint
(
1
,
6
)
==
1
)
...
@@ -209,7 +205,7 @@ class TestImperativeDeepCF(unittest.TestCase):
...
@@ -209,7 +205,7 @@ class TestImperativeDeepCF(unittest.TestCase):
startup
.
random_seed
=
seed
startup
.
random_seed
=
seed
main
=
fluid
.
Program
()
main
=
fluid
.
Program
()
main
.
random_seed
=
seed
main
.
random_seed
=
seed
"""
scope
=
fluid
.
core
.
Scope
()
scope
=
fluid
.
core
.
Scope
()
with
new_program_scope
(
main
=
main
,
startup
=
startup
,
scope
=
scope
):
with
new_program_scope
(
main
=
main
,
startup
=
startup
,
scope
=
scope
):
users
=
fluid
.
layers
.
data
(
'users'
,
[
1
],
dtype
=
'int32'
)
users
=
fluid
.
layers
.
data
(
'users'
,
[
1
],
dtype
=
'int32'
)
...
@@ -240,17 +236,18 @@ class TestImperativeDeepCF(unittest.TestCase):
...
@@ -240,17 +236,18 @@ class TestImperativeDeepCF(unittest.TestCase):
},
},
fetch_list
=
[
loss
])[
0
]
fetch_list
=
[
loss
])[
0
]
sys
.
stderr
.
write
(
'static loss %s
\n
'
%
static_loss
)
sys
.
stderr
.
write
(
'static loss %s
\n
'
%
static_loss
)
"""
with
fluid
.
imperative
.
guard
():
with
fluid
.
imperative
.
guard
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
deepcf
=
DeepCF
(
'deepcf'
,
num_users
,
num_items
,
matrix
)
deepcf
=
DeepCF
(
'deepcf'
,
num_users
,
num_items
,
matrix
)
sys
.
stderr
.
write
(
'matrix: %s
\n
'
%
deepcf
.
_rating_matrix
.
_numpy
()
)
adam
=
fluid
.
optimizer
.
AdamOptimizer
(
0.01
)
for
e
in
range
(
NUM_EPOCHES
):
for
e
in
range
(
NUM_EPOCHES
):
sys
.
stderr
.
write
(
'epoch %d
\n
'
%
e
)
sys
.
stderr
.
write
(
'epoch %d
\n
'
%
e
)
for
slice
in
range
(
0
,
BATCH_SIZE
*
NUM_BATCHES
,
BATCH_SIZE
):
for
slice
in
range
(
0
,
BATCH_SIZE
*
NUM_BATCHES
,
BATCH_SIZE
):
if
slice
+
BATCH_SIZE
>=
users_np
.
shape
[
0
]:
break
prediction
=
deepcf
(
prediction
=
deepcf
(
to_variable
(
users_np
[
slice
:
slice
+
BATCH_SIZE
]),
to_variable
(
users_np
[
slice
:
slice
+
BATCH_SIZE
]),
to_variable
(
items_np
[
slice
:
slice
+
BATCH_SIZE
]))
to_variable
(
items_np
[
slice
:
slice
+
BATCH_SIZE
]))
...
@@ -259,12 +256,10 @@ class TestImperativeDeepCF(unittest.TestCase):
...
@@ -259,12 +256,10 @@ class TestImperativeDeepCF(unittest.TestCase):
to_variable
(
labels_np
[
to_variable
(
labels_np
[
slice
:
slice
+
BATCH_SIZE
])))
slice
:
slice
+
BATCH_SIZE
])))
loss
.
_backward
()
loss
.
_backward
()
adam
=
fluid
.
optimizer
.
AdamOptimizer
(
0.01
)
adam
.
minimize
(
loss
)
adam
.
minimize
(
loss
)
deepcf
.
clear_gradients
()
deepcf
.
clear_gradients
()
dy_loss
=
loss
.
_numpy
()
dy_loss
=
loss
.
_numpy
()
sys
.
stderr
.
write
(
'dynamic loss: %s
\n
'
%
dy_loss
)
sys
.
stderr
.
write
(
'dynamic loss: %s %s
\n
'
%
(
slice
,
dy_loss
))
sys
.
stderr
.
write
(
'matrix: %s
\n
'
%
deepcf
.
_rating_matrix
.
_numpy
())
self
.
assertEqual
(
static_loss
,
dy_loss
)
self
.
assertEqual
(
static_loss
,
dy_loss
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录