Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
c624417c
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c624417c
编写于
12月 14, 2018
作者:
Q
Qiao Longfei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change sparse mode to lazy mode
上级
4035e4ba
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
18 addition
and
18 deletion
+18
-18
paddle/fluid/operators/optimizers/adam_op.cc
paddle/fluid/operators/optimizers/adam_op.cc
+1
-1
paddle/fluid/operators/optimizers/adam_op.h
paddle/fluid/operators/optimizers/adam_op.h
+6
-6
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+3
-3
python/paddle/fluid/tests/unittests/test_adam_op.py
python/paddle/fluid/tests/unittests/test_adam_op.py
+8
-8
未找到文件。
paddle/fluid/operators/optimizers/adam_op.cc
浏览文件 @
c624417c
...
@@ -111,7 +111,7 @@ class AdamOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -111,7 +111,7 @@ class AdamOpMaker : public framework::OpProtoAndCheckerMaker {
"Constant for numerical stability"
)
"Constant for numerical stability"
)
.
SetDefault
(
1.0e-8
f
);
.
SetDefault
(
1.0e-8
f
);
AddAttr
<
bool
>
(
AddAttr
<
bool
>
(
"
sparse
_mode"
,
"
lazy
_mode"
,
"(bool, default false) "
"(bool, default false) "
"only update the parameter that has gradient in sparse update"
)
"only update the parameter that has gradient in sparse update"
)
.
SetDefault
(
false
);
.
SetDefault
(
false
);
...
...
paddle/fluid/operators/optimizers/adam_op.h
浏览文件 @
c624417c
...
@@ -177,13 +177,13 @@ struct SparseAdamFunctor {
...
@@ -177,13 +177,13 @@ struct SparseAdamFunctor {
const
int64_t
*
rows_
;
const
int64_t
*
rows_
;
int64_t
row_numel_
;
int64_t
row_numel_
;
int64_t
row_count_
;
int64_t
row_count_
;
bool
sparse
_mode_
;
bool
lazy
_mode_
;
SparseAdamFunctor
(
T
beta1
,
T
beta2
,
T
epsilon
,
const
T
*
beta1_pow
,
SparseAdamFunctor
(
T
beta1
,
T
beta2
,
T
epsilon
,
const
T
*
beta1_pow
,
const
T
*
beta2_pow
,
const
T
*
mom1
,
T
*
mom1_out
,
const
T
*
beta2_pow
,
const
T
*
mom1
,
T
*
mom1_out
,
const
T
*
mom2
,
T
*
mom2_out
,
const
T
*
lr
,
const
T
*
grad
,
const
T
*
mom2
,
T
*
mom2_out
,
const
T
*
lr
,
const
T
*
grad
,
const
T
*
param
,
T
*
param_out
,
const
int64_t
*
rows
,
const
T
*
param
,
T
*
param_out
,
const
int64_t
*
rows
,
int64_t
row_numel
,
int64_t
row_count
,
bool
sparse
_mode
)
int64_t
row_numel
,
int64_t
row_count
,
bool
lazy
_mode
)
:
beta1_
(
beta1
),
:
beta1_
(
beta1
),
beta2_
(
beta2
),
beta2_
(
beta2
),
epsilon_
(
epsilon
),
epsilon_
(
epsilon
),
...
@@ -200,7 +200,7 @@ struct SparseAdamFunctor {
...
@@ -200,7 +200,7 @@ struct SparseAdamFunctor {
rows_
(
rows
),
rows_
(
rows
),
row_numel_
(
row_numel
),
row_numel_
(
row_numel
),
row_count_
(
row_count
),
row_count_
(
row_count
),
sparse_mode_
(
sparse
_mode
)
{}
lazy_mode_
(
lazy
_mode
)
{}
inline
HOSTDEVICE
void
adam_update
(
size_t
i
,
T
g
)
const
{
inline
HOSTDEVICE
void
adam_update
(
size_t
i
,
T
g
)
const
{
// The following code is the same as dense
// The following code is the same as dense
...
@@ -245,7 +245,7 @@ class AdamOpKernel : public framework::OpKernel<T> {
...
@@ -245,7 +245,7 @@ class AdamOpKernel : public framework::OpKernel<T> {
using
paddle
::
framework
::
LoDTensor
;
using
paddle
::
framework
::
LoDTensor
;
using
paddle
::
operators
::
detail
::
Ref
;
using
paddle
::
operators
::
detail
::
Ref
;
bool
sparse_mode
=
ctx
.
Attr
<
bool
>
(
"sparse
_mode"
);
bool
lazy_mode
=
ctx
.
Attr
<
bool
>
(
"lazy
_mode"
);
T
beta1
=
static_cast
<
T
>
(
ctx
.
Attr
<
float
>
(
"beta1"
));
T
beta1
=
static_cast
<
T
>
(
ctx
.
Attr
<
float
>
(
"beta1"
));
T
beta2
=
static_cast
<
T
>
(
ctx
.
Attr
<
float
>
(
"beta2"
));
T
beta2
=
static_cast
<
T
>
(
ctx
.
Attr
<
float
>
(
"beta2"
));
T
epsilon
=
static_cast
<
T
>
(
ctx
.
Attr
<
float
>
(
"epsilon"
));
T
epsilon
=
static_cast
<
T
>
(
ctx
.
Attr
<
float
>
(
"epsilon"
));
...
@@ -357,8 +357,8 @@ class AdamOpKernel : public framework::OpKernel<T> {
...
@@ -357,8 +357,8 @@ class AdamOpKernel : public framework::OpKernel<T> {
mom2_out
.
template
mutable_data
<
T
>(
ctx
.
GetPlace
()),
mom2_out
.
template
mutable_data
<
T
>(
ctx
.
GetPlace
()),
lr
.
template
data
<
T
>(),
grad_data
,
param
.
template
data
<
T
>(),
lr
.
template
data
<
T
>(),
grad_data
,
param
.
template
data
<
T
>(),
param_out
.
template
mutable_data
<
T
>(
ctx
.
GetPlace
()),
rows
,
row_numel
,
param_out
.
template
mutable_data
<
T
>(
ctx
.
GetPlace
()),
rows
,
row_numel
,
grad_merge
.
rows
().
size
(),
sparse
_mode
);
grad_merge
.
rows
().
size
(),
lazy
_mode
);
if
(
sparse
_mode
)
{
if
(
lazy
_mode
)
{
size_t
row_count
=
grad_merge
.
rows
().
size
();
size_t
row_count
=
grad_merge
.
rows
().
size
();
for
(
size_t
row_index
=
0
;
row_index
<
row_count
;
++
row_index
)
{
for
(
size_t
row_index
=
0
;
row_index
<
row_count
;
++
row_index
)
{
for
(
size_t
offset
=
0
;
offset
<
row_numel
;
++
offset
)
{
for
(
size_t
offset
=
0
;
offset
<
row_numel
;
++
offset
)
{
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
c624417c
...
@@ -664,7 +664,7 @@ class AdamOptimizer(Optimizer):
...
@@ -664,7 +664,7 @@ class AdamOptimizer(Optimizer):
epsilon
=
1e-8
,
epsilon
=
1e-8
,
regularization
=
None
,
regularization
=
None
,
name
=
None
,
name
=
None
,
sparse
_mode
=
False
):
lazy
_mode
=
False
):
assert
learning_rate
is
not
None
assert
learning_rate
is
not
None
assert
beta1
is
not
None
assert
beta1
is
not
None
assert
beta2
is
not
None
assert
beta2
is
not
None
...
@@ -677,7 +677,7 @@ class AdamOptimizer(Optimizer):
...
@@ -677,7 +677,7 @@ class AdamOptimizer(Optimizer):
self
.
_beta1
=
beta1
self
.
_beta1
=
beta1
self
.
_beta2
=
beta2
self
.
_beta2
=
beta2
self
.
_epsilon
=
epsilon
self
.
_epsilon
=
epsilon
self
.
_
sparse_mode
=
sparse
_mode
self
.
_
lazy_mode
=
lazy
_mode
def
_create_accumulators
(
self
,
block
,
parameters
):
def
_create_accumulators
(
self
,
block
,
parameters
):
assert
isinstance
(
block
,
framework
.
Block
)
assert
isinstance
(
block
,
framework
.
Block
)
...
@@ -732,7 +732,7 @@ class AdamOptimizer(Optimizer):
...
@@ -732,7 +732,7 @@ class AdamOptimizer(Optimizer):
"beta1"
:
self
.
_beta1
,
"beta1"
:
self
.
_beta1
,
"beta2"
:
self
.
_beta2
,
"beta2"
:
self
.
_beta2
,
"epsilon"
:
self
.
_epsilon
,
"epsilon"
:
self
.
_epsilon
,
"
sparse_mode"
:
self
.
_sparse
_mode
"
lazy_mode"
:
self
.
_lazy
_mode
})
})
return
adam_op
return
adam_op
...
...
python/paddle/fluid/tests/unittests/test_adam_op.py
浏览文件 @
c624417c
...
@@ -195,7 +195,7 @@ def adam_step(inputs, attributes):
...
@@ -195,7 +195,7 @@ def adam_step(inputs, attributes):
def
adam_step_sparse
(
inputs
,
attributes
,
height
,
rows
,
row_numel
,
np_grad
,
def
adam_step_sparse
(
inputs
,
attributes
,
height
,
rows
,
row_numel
,
np_grad
,
sparse
_mode
):
lazy
_mode
):
'''
'''
Simulate one step of the adam optimizer
Simulate one step of the adam optimizer
:param inputs: dict of inputs
:param inputs: dict of inputs
...
@@ -231,7 +231,7 @@ def adam_step_sparse(inputs, attributes, height, rows, row_numel, np_grad,
...
@@ -231,7 +231,7 @@ def adam_step_sparse(inputs, attributes, height, rows, row_numel, np_grad,
class
TestSparseAdamOp
(
unittest
.
TestCase
):
class
TestSparseAdamOp
(
unittest
.
TestCase
):
def
setup
(
self
,
scope
,
place
,
sparse
_mode
):
def
setup
(
self
,
scope
,
place
,
lazy
_mode
):
beta1
=
0.78
beta1
=
0.78
beta2
=
0.836
beta2
=
0.836
epsilon
=
1e-4
epsilon
=
1e-4
...
@@ -265,19 +265,19 @@ class TestSparseAdamOp(unittest.TestCase):
...
@@ -265,19 +265,19 @@ class TestSparseAdamOp(unittest.TestCase):
param_out
,
mom1
,
mom2
=
adam_step_sparse
(
self
.
dense_inputs
,
self
.
attrs
,
param_out
,
mom1
,
mom2
=
adam_step_sparse
(
self
.
dense_inputs
,
self
.
attrs
,
height
,
rows
,
row_numel
,
height
,
rows
,
row_numel
,
np_array
,
sparse
_mode
)
np_array
,
lazy
_mode
)
self
.
outputs
=
{
self
.
outputs
=
{
"ParamOut"
:
param_out
,
"ParamOut"
:
param_out
,
"Moment1Out"
:
mom1
,
"Moment1Out"
:
mom1
,
"Moment2Out"
:
mom2
"Moment2Out"
:
mom2
}
}
def
check_with_place
(
self
,
place
,
sparse
_mode
):
def
check_with_place
(
self
,
place
,
lazy
_mode
):
scope
=
core
.
Scope
()
scope
=
core
.
Scope
()
self
.
setup
(
scope
,
place
,
sparse
_mode
)
self
.
setup
(
scope
,
place
,
lazy
_mode
)
op_args
=
dict
()
op_args
=
dict
()
op_args
[
'
sparse_mode'
]
=
sparse
_mode
op_args
[
'
lazy_mode'
]
=
lazy
_mode
for
key
,
np_array
in
self
.
dense_inputs
.
items
():
for
key
,
np_array
in
self
.
dense_inputs
.
items
():
var
=
scope
.
var
(
key
).
get_tensor
()
var
=
scope
.
var
(
key
).
get_tensor
()
var
.
set
(
np_array
,
place
)
var
.
set
(
np_array
,
place
)
...
@@ -313,8 +313,8 @@ class TestSparseAdamOp(unittest.TestCase):
...
@@ -313,8 +313,8 @@ class TestSparseAdamOp(unittest.TestCase):
if
core
.
is_compiled_with_cuda
():
if
core
.
is_compiled_with_cuda
():
places
.
append
(
core
.
CUDAPlace
(
0
))
places
.
append
(
core
.
CUDAPlace
(
0
))
for
place
in
places
:
for
place
in
places
:
for
sparse
_mode
in
(
True
,
False
):
for
lazy
_mode
in
(
True
,
False
):
self
.
check_with_place
(
place
,
sparse
_mode
)
self
.
check_with_place
(
place
,
lazy
_mode
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录